示例#1
0
 def testNorm(self):
     """Test that we recover the correct values"""
     ds,target = reduction.applyNormalization(self.test_array,reference='bm1_counts',target=-1)
     result = ds.storage
     for i in range(4):
         # Convoluted syntax below to get all to work with nested lists
         testing = map(all,(result[i] == self.core_array2).tolist())
         self.failUnless(all(testing))
     self.failUnless(target == max(self.test_array.bm1_counts))
示例#2
0
 def testVar(self):
     """Test correct calculation of variances"""
     target = reduction.applyNormalization(self.test_array,reference='bm1_counts',target=-1)
     result = self.test_array.var
     # Check a few random points
     """If norm val 1 is 20 (the maximum), then the scale factor is 1.0 for frame 1
     with a variance of 1*20/400 = 0.05. If the observed intensity is 25, then the
     variance should be 0.05 * 25 * 25 + 25*1.0*1.0 = 31.25 + 25.0 = 56.25"""
     self.failUnless(result[1][1][1] == 56.25)
     """If norm val 0 is 10, scale is 2 for frame 0 and variance 2 * 20/(10*10) = 0.4
     So for observed intensity 12.5, variance is 0.4 * 12.5 * 12.5 + 12.5*2*2 =
     62.5 + 50 = 112.5"""
     self.failUnless(result[0][0][0] == 112.5)
示例#3
0
def __run_script__(fns):

    from Reduction import reduction, AddCifMetadata
    from os.path import basename
    from os.path import join
    import os, re
    import time  #how fast are we going?
    from Formats import output

    elapsed = time.clock()
    print 'Started working at %f' % (time.clock() - elapsed)
    df.datasets.clear()

    # save user preferences
    prof_names, prof_values = save_user_prefs()

    # store current Git versions for data output
    code_versions = {
        "GUI": __UI_gitversion[4:-1],
        "Reduction library": reduction.gitversion[4:-1]
    }
    # check input
    if (fns is None or len(fns) == 0):
        print 'no input datasets'
        return

    # pre-check that we can write the result
#    output_destination = out_folder.value
    output_destination = get_save_path()
    if output_xyd.value or output_fxye.value:
        if not os.access(output_destination, os.W_OK):
            open_error("Unable to write to folder %s" % output_destination)
            return
    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        if norm_ref.strip() == '':
            open_error(
                "You have asked to apply normalisation but not specified any normalisation reference"
            )
            return
        norm_tar = str(norm_target).lower()

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar

        # use provided reference value
        else:
            norm_tar = float(norm_tar)

    else:
        norm_ref = None
        norm_tar = None

    # check if bkg-map needs to be loaded
    if bkg_apply.value:
        if not bkg_map.value:
            bkg = None
            print 'WARNING: no bkg-map was specified'
        else:
            bkg = Dataset(str(bkg_map.value))
    else:
        bkg = None

    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            eff_map_canonical = get_calibration_path() + '/' + eff_map.value
            if eff_map_canonical[0:5] != 'file:':
                eff_map_canonical = 'file:' + eff_map_canonical
            if not eff_map_canonical in eff_map_cache:
                try:
                    eff_map_cache[
                        eff_map_canonical] = reduction.read_efficiency_cif(
                            eff_map_canonical)
                except:
                    open_error("Failed to read efficiency file %s" %
                               eff_map_canonical)
                    return
            else:
                print 'Found in cache ' + ` eff_map_canonical `
        eff = eff_map_cache[eff_map_canonical]
    else:
        eff = None

    # check if vertical tube correction needs to be loaded
    if vtc_apply.value:
        if not vtc_file.value:
            vtc = None
            print 'WARNING: no vtc-file was specified'
        else:
            vtc = get_calibration_path() + '/' + str(vtc_file.value)
    else:
        vtc = None
    # check if horizontal tube correction needs to be loaded
    if htc_apply.value:
        if not htc_file.value:
            htc = None
            print 'WARNING: no htc-file was specified'
        else:
            htc = get_calibration_path() + '/' + str(htc_file.value)
    else:
        htc = None

    reduced_files = []
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    for fn in fns:
        # load dataset
        ds = df[fn]
        # extract basic metadata
        ds = AddCifMetadata.extract_metadata(ds, codeversions=code_versions)
        AddCifMetadata.store_reduction_preferences(ds, prof_names, prof_values)
        # remove redundant dimensions and convert to floating point
        rs = ds.get_reduced() * 1.0
        rs.copy_cif_metadata(ds)
        # check if normalized is required
        if norm_ref:
            ds, norm_tar = reduction.applyNormalization(
                rs, reference=norm_table[norm_ref], target=norm_tar)
        else:
            ds = rs
        if bkg:
            ds = reduction.getBackgroundCorrected(ds, bkg,
                                                  norm_table[norm_ref],
                                                  norm_tar)

        print 'Finished normalisation, background subtraction at %f' % (
            time.clock() - elapsed)
        # check if vertical tube correction is required
        if vtc:
            ds = reduction.getVerticallyCorrected(ds, vtc)
        print 'Finished vertical offset correction at %f' % (time.clock() -
                                                             elapsed)
        # check if efficiency correction is required
        if eff:
            ds = reduction.getEfficiencyCorrected(ds, eff)

        print 'Finished efficiency correction at %f' % (time.clock() - elapsed)
        # Before fiddling with axes, get the ideal stepsize
        stepsize = reduction.get_stepsize(ds)
        print 'Ideal stepsize determined to be %f' % stepsize
        # check if horizontal tube correction is required
        if htc:
            ds = reduction.getHorizontallyCorrected(ds, htc)

        print 'Finished horizontal correction at %f' % (time.clock() - elapsed)
        # Stitching. If we are recalculating gain, this is purely for
        # informational purposes. We don't want to take the 100x time penalty of
        # multiplying a 2D array by the gain factor for each tube, so we
        # stitch on a 1D array after doing the gain re-refinement.
        if ds.ndim > 2:
            stitched = reduction.getStitched(ds,
                                             ignore=str(asm_drop_frames.value))
        # Display dataset
        print 'Finished stitching at %f' % (time.clock() - elapsed)
        Plot1.set_dataset(stitched)
        Plot1.title = stitched.title
        # check if we are recalculating gain
        if regain_apply.value:
            bottom = int(vig_lower_boundary.value)
            top = int(vig_upper_boundary.value)
            cs, gain, esds, chisquared, no_overlaps = reduction.do_overlap(
                ds,
                regain_iterno.value,
                bottom=bottom,
                top=top,
                exact_angles=htc,
                drop_frames=str(asm_drop_frames.value))
            if cs is not None:
                print 'Have new gains at %f' % (time.clock() - elapsed)
                fg = Dataset(gain)
                fg.var = esds
            # set horizontal axis (ideal values)


#               Plot4.set_dataset(Dataset(chisquared))   #chisquared history
#               Plot5.set_dataset(fg)   #final gain plot
            else:
                open_error(
                    "Cannot do gain recalculation as the scan ranges do not overlap."
                )
                return
        if not vig_apply_rescale.value:
            norm_const = -1.0
        else:
            norm_const = float(vig_rescale_target.value)
        # set the cluster value
        if str(vig_cluster.value) in ['Merge', 'Sum']:
            cluster = (stepsize * 0.6, str(vig_cluster.value)
                       )  #60 percent of ideal
        else:
            cluster = (0.0, 'None')
        if not regain_apply.value:  #already done
            final_result = reduction.getVerticalIntegrated(
                stitched,
                axis=0,
                normalization=norm_const,
                cluster=cluster,
                bottom=int(vig_lower_boundary.value),
                top=int(vig_upper_boundary.value))
            print 'Finished vertical integration at %f' % (time.clock() -
                                                           elapsed)
        else:
            if str(vig_cluster.value
                   ) == 'Sum':  #simulate a sum for the gain recalculated value
                cs *= no_overlaps
                info_string = "\nFinal values were multiplied by %d to simulate summation of individual points." % no_overlaps
                cs.add_metadata("_pd_proc_info_data_reduction",
                                info_string,
                                append=True)
            final_result = cs
        # Display reduced dataset
        send_to_plot(final_result, Plot2)
        if copy_acc.value:  #user wants us to accumulate it
            plh_copy_proc()
        # Output datasets
        # Calculate inserted string: %s for sample name, %t for temperature
        stem = str(output_stem.value)
        stem = re.sub(r'[^\w+=()*^@~:{}\[\].%-]', '_', stem)
        if '%s' in stem:
            samplename = final_result.harvest_metadata(
                "CIF")['_pd_spec_special_details']
            name_front = samplename.split()[0]
            stem = stem.replace('%s', name_front)
        if '%t' in stem:
            temperature = 'Unknown_temperature'
            stem = stem.replace('%t', temperature)
        print 'Filename stem is now ' + stem
        filename_base = join(get_save_path(),
                             basename(str(fn))[:-7] + '_' + stem)
        if output_xyd.value or output_fxye.value:  #write CIF if other files written
            output.write_cif_data(final_result, filename_base)
            reduced_files.append(filename_base + '.cif')
        if output_xyd.value:
            output.write_xyd_data(final_result,
                                  filename_base,
                                  codeversions=code_versions)
            reduced_files.append(filename_base + '.xyd')
        if output_fxye.value:
            output.write_fxye_data(final_result,
                                   filename_base,
                                   codeversions=code_versions)
            reduced_files.append(filename_base + '.xye')

        # ds.save_copy(join(str(out_folder.value), 'reduced_' + basename(str(fn))))
        print 'Finished writing data at %f' % (time.clock() - elapsed)

    if len(reduced_files) > 0:
        zip_files(reduced_files,
                  'Echidna_rd_' + str(int(time.time()))[2:] + '.zip')
def __run_script__(fns):
    
    from Reduction import reduction, AddCifMetadata
    from os.path import basename
    from os.path import join
    from Formats import output
    import re
    
    df.datasets.clear()
    
    # save user preferences
    prof_names,prof_values = save_user_prefs()

    # check input
    if (fns is None or len(fns) == 0) :
        print 'no input datasets'
        return

    # set the title for Plot2
    # Plot2.title = 'Plot 2'
    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        norm_tar = norm_target

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]     
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar
            
        # use provided reference value
        else:
            norm_tar = float(norm_tar)
            
    else:
        norm_ref = None
        norm_tar = None
    
    # check if bkg-map needs to be loaded
    if bkg_apply.value:
        if not bkg_map.value:
            bkg = None
            print 'WARNING: no bkg-map was specified'
        else:
            bkg = Dataset(str(bkg_map.value))
            # to avoid complaints in routines that expect it
            reduction.AddCifMetadata.add_metadata_methods(bkg)
    else:
        bkg = None
    
    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            eff = Dataset(str(eff_map.value))
    else:
        eff = None
    # Check for rescale
    if vig_apply_rescale.value:
        vig_normalisation = float(vig_rescale_target.value)
    else:
        vig_normalisation = -1
    group_val = grouping_options[str(output_grouping.value)]
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    for fn in fns:
        # load dataset
        ds = df[fn]
        # extract basic metadata
        ds = reduction.AddCifMetadata.extract_metadata(ds)
        try:
             stth_value = sum(ds.stth)/len(ds.stth) # save for later
             all_stth = ds.stth[:] # also save for later
        except TypeError:
            stth_value = ds.stth
        if ds.ndim > 3:
            rs = ds.get_reduced()
        else:
            rs = ds
        rs = rs * 1.0  #convert to float
        rs.copy_cif_metadata(ds)
        # check if normalized is required 
        if norm_ref:
            norm_tar = reduction.applyNormalization(rs, reference=norm_table[norm_ref], target=norm_tar)
        if bkg:
            rs = reduction.getBackgroundCorrected(rs, bkg, norm_table[norm_ref], norm_tar)
        # check if efficiency correction is required
        assert rs.dtype == Array([1.2,1.3]).dtype
        if eff:
            ds = reduction.getEfficiencyCorrected(rs, eff)
        else:
            ds = rs
        # Calculate inserted string: %s for sample name, %t for temperature
        stem = str(output_stem.value)
        stem = re.sub(r'[^\w+=()*^@~:{}\[\].%-]','_',stem)
        if '%s' in stem:
             samplename = ds.harvest_metadata("CIF")['_pd_spec_special_details']
             name_front = re.sub(r'[^\w+=()*^@~:{}\[\].%-]','_',samplename)
             stem = stem.replace('%s',name_front)
        if '%t1' in stem:
             # get tc1
             temperature = df[fn]["/entry1/sample/tc1/sensor/sensorValueA"]
             print `temperature`
             try:
                 avetemp = sum(temperature)/len(temperature)
             except TypeError:
                 avetemp = temperature
             stem = stem.replace('%t1',"%.0fK" % avetemp)
        print 'Filename stem is now ' + stem
        # restrict output set of frames
        restrict_spec = str(output_restrict.value)
        if ':' in restrict_spec:
            first,last = map(int,restrict_spec.split(':'))
            start_frames = last
            current_frame_start = first
            frameno = first
        else:
            start_frames = len(ds)
            current_frame_start = 0
            frameno = 0
        # perform grouping of sequential input frames   
        while frameno <= start_frames:
            if group_val is None:
                target_val = ""
                final_frame = start_frames-1
                frameno = start_frames
            else:
                stth_value = all_stth[current_frame_start]
                target_val = ds[group_val][current_frame_start]
                try:
                    if ds[frameno][group_val] == target_val:
                        frameno += 1
                        continue
                except:   #Assume an exception is due to too large frameno
                    print 'Exiting frame loop due to error'
            # frameno is the first frame with the wrong values
            cs = ds.get_section([current_frame_start,0,0],[frameno-current_frame_start,ds.shape[1],ds.shape[2]])
            cs.copy_cif_metadata(ds)
            print 'Summing frames from %d to %d, shape %s, start 2th %f' % (current_frame_start,frameno-1,cs.shape,stth_value)
            if target_val != "":
                print 'Corresponding to a target value of ' + `target_val`
            # sum the input frames
            print 'cs axes: ' + cs.axes[0].title + ' ' + cs.axes[1].title + ' ' + cs.axes[2].title
            # es = cs.intg(axis=0)
            es = reduction.getSummed(cs,applyStth=stth_value)  # does axis correction as well
            es.copy_cif_metadata(cs)
            print 'es axes: ' + `es.axes[0].title` + es.axes[1].title
            Plot1.set_dataset(es)
            cs = reduction.getVerticalIntegrated(es, axis=0, normalization=vig_normalisation,
                                                     bottom = int(vig_lower_boundary.value),
                                                     top=int(vig_upper_boundary.value))
            if target_val != "":
                cs.title = cs.title + "_" + str(target_val)
            try:
                send_to_plot(cs,Plot2,add=True,change_title=False)
            except IndexError:  #catch error from GPlot
                send_to_plot(cs,Plot2,add=False,change_title=True)
            # Output datasets
            filename_base = join(str(out_folder.value),basename(str(fn))[:-7]+'_'+stem+"_"+str(target_val))
            if output_cif.value:
                output.write_cif_data(cs,filename_base)
            if output_xyd.value:
                output.write_xyd_data(cs,filename_base)
            if output_fxye.value:
                output.write_fxye_data(cs,filename_base)
            #loop to next group of datasets
            current_frame_start = frameno
            frameno += 1
def __run_script__(fns):
    
    from Reduction import reduction,AddCifMetadata
    from os.path import basename
    from os.path import join
    import os,re
    import time           #how fast are we going?
    from Formats import output
    
    num_step = 9
    prog_bar.max = len(fns) * num_step
    prog_bar.selection = 1

    elapsed = time.clock()
    print 'Started working at %f' % (time.clock()-elapsed)
    df.datasets.clear()
    
    # save user preferences
    prof_names,prof_values = save_user_prefs()

    # store current Git versions for data output
    code_versions = {"GUI":__UI_gitversion[4:-1],
                     "Reduction library":reduction.gitversion[4:-1]}
    # check input
    if (fns is None or len(fns) == 0) :
        print 'no input datasets'
        return

    # pre-check that we can write the result
    output_destination = out_folder.value
    if output_xyd.value or output_fxye.value:
        if not os.access(output_destination,os.W_OK):
            open_error("Unable to write to folder %s" % output_destination)
            return
    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        if norm_ref.strip() == '':
            open_error("You have asked to apply normalisation but not specified any normalisation reference")
            return
        norm_tar = str(norm_target).lower()

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]     
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar
            
        # use provided reference value
        else:
            norm_tar = float(norm_tar)
            
    else:
        norm_ref = None
        norm_tar = None
    
    # check if bkg-map needs to be loaded
    if bkg_apply.value:
        if not bkg_map.value:
            bkg = None
            print 'WARNING: no bkg-map was specified'
        else:
            bkg = Dataset(str(bkg_map.value)).get_reduced()
    else:
        bkg = None
    
    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            eff_map_canonical = str(eff_map.value)
            if eff_map_canonical[0:5] != 'file:':
                eff_map_canonical = 'file:' + eff_map_canonical
            if not eff_map_canonical in eff_map_cache:
                try:
                    eff_map_cache[eff_map_canonical] = reduction.read_efficiency_cif(eff_map_canonical)
                except:
                    open_error("Failed to read efficiency file %s" % eff_map_canonical)
                    return
            else:
                print 'Found in cache ' + `eff_map_canonical`
        eff = eff_map_cache[eff_map_canonical]
    else:
        eff = None
    
    # check if vertical tube correction needs to be loaded
    if vtc_apply.value:
        if not vtc_file.value:
            vtc = None
            print 'WARNING: no vtc-file was specified'
        else:
            vtc = str(vtc_file.value)
    else:
        vtc = None
    # check if horizontal tube correction needs to be loaded
    if htc_apply.value:
        if not htc_file.value:
            htc = None
            print 'WARNING: no htc-file was specified'
        else:
            htc = str(htc_file.value)
    else:
        htc = None

    # check if gain correction needs to be loaded
    regain_data = []
    if regain_load.value:
        if not regain_load_filename.value:
            open_error("You have requested loading of gain correction from a file but no file has been specified")
            return
        rlf = str(regain_load_filename.value)
        regain_data = reduction.load_regain_values(rlf)
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    prog_bar.selection = 2
    fn_idx = 0
    for fn in fns:
        # load dataset
        ds = df[fn]
        if not norm_uniform.value:
            norm_tar = -1   #reinitialise
        try:
            prog_bar.selection = fn_idx * num_step
            # extract basic metadata
            print 'Code versions:' + `code_versions`
            ds = AddCifMetadata.extract_metadata(ds,codeversions=code_versions)
            AddCifMetadata.store_reduction_preferences(ds,prof_names,prof_values)
            # remove redundant dimensions and convert to floating point
            rs = ds.get_reduced()*1.0
            rs.copy_cif_metadata(ds)
            # check if normalized is required 
            if norm_ref:
                ds,norm_tar = reduction.applyNormalization(rs, reference=norm_table[norm_ref], target=norm_tar)
            else:
                ds = rs
            if bkg:
                AddCifMetadata.add_metadata_methods(bkg)
                ds = reduction.getBackgroundCorrected(ds, bkg, norm_table[norm_ref], norm_tar)
            print 'Finished normalisation, background subtraction at %f' % (time.clock()-elapsed)
            prog_bar.selection = fn_idx * num_step + 1
            # check that we have the necessary dimensions
            dims = ds.shape
            if dims[1] != 128:
                rebin_factor = int(dims[1]/128)
                print 'Need to rebin from %d to 128, factor of %d; stand by...' % (dims[1],rebin_factor)
                ds = reduction.rebin(ds,axis=1,factor=rebin_factor)
            else:
                print 'No need to rebin, dataset shape is ' + repr(dims)
            # check if vertical tube correction is required
            if vtc:
                ds = reduction.getVerticallyCorrected(ds, vtc)
            print 'Finished vertical offset correction at %f' % (time.clock()-elapsed)
            prog_bar.selection = fn_idx * num_step + 2
            # check if efficiency correction is required
            if eff:
                ds = reduction.getEfficiencyCorrected(ds, eff)
            
            print 'Finished efficiency correction at %f' % (time.clock()-elapsed)
            prog_bar.selection = fn_idx * num_step + 3
            # Before fiddling with axes, get the ideal stepsize
            stepsize = reduction.get_stepsize(ds)
            print 'Ideal stepsize determined to be %f' % stepsize
            prog_bar.selection = fn_idx * num_step + 4
            # check if horizontal tube correction is required
            if htc:
                ds = reduction.getHorizontallyCorrected(ds, htc)
    
            print 'Finished horizontal correction at %f' % (time.clock()-elapsed)
            prog_bar.selection = fn_idx * num_step + 5
            # Stitching. If we are recalculating gain, this is purely for
            # informational purposes. We don't want to take the 100x time penalty of
            # multiplying a 2D array by the gain factor for each tube, so we
            # stitch using a 1D array after doing the gain re-refinement.
            drop_tubes = str(asm_drop_tubes.value)
            if ds.ndim > 2:
                # See if we are ignoring any tubes
                stitched = reduction.getStitched(ds,ignore=str(asm_drop_frames.value),drop_tubes=drop_tubes)
            # Display dataset
            print 'Finished stitching at %f' % (time.clock()-elapsed)
            prog_bar.selection = fn_idx * num_step + 6
            Plot1.set_dataset(stitched)
            Plot1.title = stitched.title
            n_logger.log_plot(Plot1, footer = Plot1.title)
            # check if we are recalculating gain 
            if regain_apply.value:
               bottom = int(vig_lower_boundary.value)
               top = int(vig_upper_boundary.value)
               dumpfile = None
#               if regain_dump_tubes.value:
#                   dumpfile = filename_base+".tubes"
               cs,gain,esds,chisquared,no_overlaps = reduction.do_overlap(ds,regain_iterno.value,bottom=bottom,top=top,
                                                                          exact_angles=htc,drop_frames=str(asm_drop_frames.value),drop_tubes=drop_tubes,use_gains=regain_data,dumpfile=dumpfile,
                                                                          do_sum=regain_sum.value)
               if cs is not None:
                   print 'Have new gains at %f' % (time.clock() - elapsed)
                   fg = Dataset(gain)
                   fg.var = esds**2
                   # set horizontal axis (ideal values)
                   Plot4.set_dataset(Dataset(chisquared))   #chisquared history
                   Plot5.set_dataset(fg)   #final gain plot
                   # now save the file if requested
                   if regain_store.value and not regain_load.value:
                       gain_comment = "Gains refined from file %s" % fn
                       reduction.store_regain_values(str(regain_store_filename.value),gain,gain_comment)
               else:
                   open_error("Cannot do gain recalculation as the scan ranges do not overlap.")
                   return
            if not vig_apply_rescale.value:
                norm_const = -1.0
            else:
                norm_const = float(vig_rescale_target.value)
            # set the cluster value
            if str(vig_cluster.value) in ['Merge','Sum']:
                cluster = (stepsize * 0.6,str(vig_cluster.value))  #60 percent of ideal
            else:
                cluster = (0.0,'None')
            if not regain_apply.value:  #already done
                final_result = reduction.getVerticalIntegrated(stitched, axis=0, normalization=norm_const,
                                                     cluster=cluster,bottom = int(vig_lower_boundary.value),
                                                     top=int(vig_upper_boundary.value))
                print 'Finished vertical integration at %f' % (time.clock()-elapsed)
            else:
                if str(vig_cluster.value) == 'Sum':  #simulate a sum for the gain recalculated value
                    cs *= no_overlaps
                    info_string = "\nFinal values were multiplied by %d to simulate summation of individual points." % no_overlaps
                    cs.add_metadata("_pd_proc_info_data_reduction",info_string,append=True)
                if norm_const > 0:  #rescale requested but not performed
                    reduction.rescale(cs,norm_const)
                final_result = cs
            prog_bar.selection = fn_idx * num_step + 7
            # Display reduced dataset
            send_to_plot(final_result,Plot2)
            n_logger.log_plot(Plot2, footer = Plot2.title)
            if copy_acc.value:   #user wants us to accumulate it
                plh_copy_proc()
            # Output datasets
            # Calculate inserted string: %s for sample name, %t for temperature
            stem = str(output_stem.value)
            stem = re.sub(r'[^\w+=()*^@~:{}\[\].%-]','_',stem)
            if '%s' in stem:
                 samplename = final_result.harvest_metadata("CIF")['_pd_spec_special_details']
                 name_front = samplename.split()[0]
                 stem = stem.replace('%s',name_front)
            if '%t' in stem:
                 temperature = 'Unknown_temperature'
                 stem = stem.replace('%t',temperature)
            print 'Filename stem is now ' + stem
            filename_base = join(str(out_folder.value),basename(str(fn))[:-7] + '_' + stem)
            if output_xyd.value or output_fxye.value or output_topas.value:  #write CIF if other files written
                output.write_cif_data(final_result,filename_base)
            if output_xyd.value:
                add_header = output_naked.value
                output.write_xyd_data(final_result,filename_base,codeversions=code_versions,naked=add_header)
            if output_fxye.value:
                output.write_fxye_data(final_result,filename_base,codeversions=code_versions)
            if output_topas.value:
                output.write_xyd_data(final_result,filename_base,codeversions=code_versions,comment_char="!",extension='topas')
            # ds.save_copy(join(str(out_folder.value), 'reduced_' + basename(str(fn))))
            print 'Finished writing data at %f' % (time.clock()-elapsed)
            prog_bar.selection = fn_idx * num_step + 8
            fn_idx += 1
        finally:
            df[fn].close()
            prog_bar.selection = 0
示例#6
0
def __run_script__(fns):
    
    from Reduction import reduction
    from os.path import basename
    from os.path import join
    import time           #how fast are we going?
    import AddCifMetadata,output
    
    elapsed = time.clock()
    print 'Started working on Split Scans at %f' % (time.clock()-elapsed)
    df.datasets.clear()
    
    # check input
    if (fns is None or len(fns) == 0) :
        print 'no input datasets'
        return

    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        norm_tar = str(norm_target.value).lower()

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]     
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar
            
        # use provided reference value
        else:
            norm_tar = float(norm_tar)
            
    else:
        norm_ref = None
        norm_tar = None
    
    # check if bkg-map needs to be loaded
    if bkg_apply.value:
        if not bkg_map.value:
            bkg = None
            print 'WARNING: no bkg-map was specified'
        else:
            bkg = Dataset(str(bkg_map.value))
    else:
        bkg = None
    
    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            eff = reduction.read_efficiency_cif(str(eff_map.value))
            if eff.ndim != 2:
                raise AttributeError('eff.ndim != 2')
    else:
        eff = None
    
    # check if vertical tube correction needs to be loaded
    if vtc_apply.value:
        if not vtc_file.value:
            vtc = None
            print 'WARNING: no vtc-file was specified'
        else:
            vtc = str(vtc_file.value)
    else:
        vtc = None
    
    # check if horizontal tube correction needs to be loaded
    if htc_apply.value:
        if not htc_file.value:
            htc = None
            print 'WARNING: no htc-file was specified'
        else:
            htc = str(htc_file.value)
    else:
        htc = None
        
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    for fn in fns:
        # load dataset
        ds = df[fn]
        # extract basic metadata
        ds = reduction.AddCifMetadata.extract_metadata(ds)
        # remove redundant dimensions
        ds = ds.get_reduced()
        # check if normalized is required 
        if norm_ref:
            norm_tar = reduction.applyNormalization(ds, reference=norm_table[norm_ref], target=norm_tar)
        if bkg:
            ds = reduction.getBackgroundCorrected(ds, bkg, norm_table[norm_ref], norm_tar)
        
        print 'Finished normalisation, background subtraction at %f' % (time.clock()-elapsed)
        # check if vertical tube correction is required
        if vtc:
            ds = reduction.getVerticallyCorrected(ds, vtc)
        print 'Finished vertical offset correction at %f' % (time.clock()-elapsed)
        # check if efficiency correction is required
        if eff:
            ds = reduction.getEfficiencyCorrected(ds, eff)
        
        print 'Finished efficiency correction at %f' % (time.clock()-elapsed)
        # check if horizontal tube correction is required
        if htc:
            ds = reduction.getHorizontallyCorrected(ds, htc)

        print 'Finished horizontal correction at %f' % (time.clock()-elapsed)
        if vig_apply_rescale.value:
            ds = reduction.getVerticalIntegrated(ds, normalization=float(vig_rescale_target.value))
        else:
            ds = reduction.getVerticalIntegrated(ds)
        print 'Finished vertical integration at %f' % (time.clock()-elapsed)
        Plot1.clear()
        Plot1.set_dataset(ds)
        # output.write_cif_data(ds,join(str(out_folder.value), 'reduced_' + basename(str(fn))[:-7]))
        # Now write out each tube's data
        output.dump_tubes(ds,join(str(out_folder.value), 'split_' + basename(str(fn))[:-7]))
        print 'Finished writing data at %f' % (time.clock()-elapsed)
示例#7
0
def __run_script__(fns):
    global Plot4,Plot5,Plot6
    from Reduction import reduction,AddCifMetadata
 
    from os.path import basename
    from os.path import join
    import time           #how fast are we going?
    from Formats import output
    
    elapsed = time.clock()
    print 'Started working at %f' % (time.clock()-elapsed)
    df.datasets.clear()
    
    # check input
    if (fns is None or len(fns) == 0) :
        print 'no input datasets'
        return

    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        norm_tar = str(norm_target.value).lower()

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]     
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar
            
        # use provided reference value
        else:
            norm_tar = float(norm_tar)
            
    else:
        norm_ref = None
        norm_tar = None

    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            eff_map_canonical = str(eff_map.value)
            if eff_map_canonical[0:5] != 'file:':
                eff_map_canonical = 'file:' + eff_map_canonical
            if not eff_map_canonical in eff_map_cache:
                eff_map_cache[eff_map_canonical] = reduction.read_efficiency_cif(eff_map_canonical)
            else:
                print 'Found in cache ' + `eff_map_canonical`
        eff = eff_map_cache[eff_map_canonical]
    else:
        eff = None
    
    # check if vertical tube correction needs to be loaded
    if vtc_apply.value:
        if not vtc_file.value:
            vtc = None
            print 'WARNING: no vtc-file was specified'
        else:
            vtc = str(vtc_file.value)
    else:
        vtc = None
    
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    for fn in fns:
        # load dataset
        ds = df[fn]
        # extract basic metadata
        ds = reduction.AddCifMetadata.extract_metadata(ds)
        # remove redundant dimensions
        rs = ds.get_reduced()
        rs.copy_cif_metadata(ds)
        # check if normalized is required 
        if norm_ref:
            ds,norm_tar = reduction.applyNormalization(rs, reference=norm_table[norm_ref], target=norm_tar)
        
        print 'Finished normalisation at %f' % (time.clock()-elapsed)
        # check if vertical tube correction is required
        if vtc:
            ds = reduction.getVerticallyCorrected(ds, vtc)
        print 'Finished vertical offset correction at %f' % (time.clock()-elapsed)
        # check if efficiency correction is required
        if eff:
            ds = reduction.getEfficiencyCorrected(ds, eff)
        
        print 'Finished efficiency correction at %f' % (time.clock()-elapsed)
        # check if we are recalculating gain
        if regain_apply.value:
            b = ds.intg(axis=1).get_reduced()  #reduce dimension
            ignore = regain_ignore.value    #Ignore first two tubes
            # Determine pixels per tube interval
            tube_pos = ds.axes[-1]
            tubesep = abs(tube_pos[0]-tube_pos[-1])/(len(tube_pos)-1)
            tube_steps = ds.axes[0]
            bin_size = abs(tube_steps[0]-tube_steps[-1])/(len(tube_steps)-1)
            pixel_step = int(round(tubesep/bin_size))
            bin_size = tubesep/pixel_step
            print '%f tube separation, %d steps before overlap, ideal binsize %f' % (tubesep,pixel_step,bin_size)
            # Reshape with individual sections summed
            c = b.reshape([b.shape[0]/pixel_step,pixel_step,b.shape[-1]])
            print `b.shape` + "->" + `c.shape`
            # sum the individual unoverlapped sections
            d = c.intg(axis=1)
            e = d.transpose()
            # we skip the first tubes' data as it is all zero
            # Get an initial average to start with
            bottom = vig_lower_boundary.value
            top = vig_upper_boundary.value
            resummed = ds[:,bottom:top,:]
            resummed = resummed.intg(axis=1).get_reduced()
            first_gain = array.ones(len(b.transpose())-ignore)
            first_ave,x,first_var = overlap.apply_gain(resummed.transpose()[ignore:,:],1.0/resummed.transpose().var[ignore:,:],pixel_step,first_gain, calc_var=True)
            if regain_unit_weights.value is True:
                weights = array.ones_like(e[ignore:])
            else:
                weights = 1.0/e[ignore:].var
            q= iterate_data(e[ignore:],weights,pixel_step=1,iter_no=int(regain_iterno.value))
            # Now we actually apply the vertical limits requested
           
            f,x, varf = overlap.apply_gain(resummed.transpose()[ignore:,:],1.0/resummed.transpose().var[ignore:,:],pixel_step,q[0],calc_var=True)
            # Get error for full dataset
            esds = overlap.calc_error_new(b.transpose()[ignore:,:],f,q[0],pixel_step)
            f = Dataset(f)
            f.title = "After scaling"
            f.var = varf
            # construct the ideal axes
            axis = arange(len(f))
            f.axes[0] = axis*bin_size + ds.axes[0][0] + ignore*pixel_step*bin_size
            f.copy_cif_metadata(ds)
            print `f.shape` + ' ' + `x.shape`
            Plot1.set_dataset(f)
            first_ave = Dataset(first_ave)
            first_ave.var = first_var
            first_ave.title = "Before scaling"
            first_ave.axes[0] = f.axes[0]
            Plot1.add_dataset(Dataset(first_ave))
            Plot4.set_dataset(Dataset(q[4]))
            fg = Dataset(q[0])
            fg.var = esds
            Plot5.set_dataset(fg)
            # show old esds
            fgold = Dataset(q[0])
            fgold.var = q[5]
            Plot5.add_dataset(fgold)
            residual_map = Dataset(q[3])
            try:
                Plot6.set_dataset(residual_map)
            except:
                pass
        print 'Finished regain calculation at %f' % (time.clock() - elapsed)
        # Output datasets
        filename_base = join(str(out_folder.value),str(output_stem.value) + basename(str(fn))[:-7])
        if output_cif.value:
            output.write_cif_data(f,filename_base)
        if output_xyd.value:
            output.write_xyd_data(f,filename_base)
        if output_fxye.value:
            output.write_fxye_data(f,filename_base)
        print 'Finished writing data at %f' % (time.clock()-elapsed)
示例#8
0
 def testMetadata(self):
     """Test that metadata is correctly produced"""
     target = reduction.applyNormalization(self.test_array,reference='bm1_counts',target=-1)
     pf = self.test_array.harvest_metadata("CIF")
     self.failUnless("Data normalised to %f" % float(target) in str(pf["_pd_proc_info_data_reduction"]))
def __run_script__(fns):
    
    from Reduction import reduction
    from os.path import basename
    from os.path import join
    import time           #how fast are we going?
    import AddCifMetadata,output
    
    elapsed = time.clock()
    print 'Started working at %f' % (time.clock()-elapsed)
    df.datasets.clear()
    
    # check input
    if (fns is None or len(fns) == 0) :
        print 'no input datasets'
        return

    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        norm_tar = str(norm_target.value).lower()

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]     
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar
            
        # use provided reference value
        else:
            norm_tar = float(norm_tar)
            
    else:
        norm_ref = None
        norm_tar = None
    
    # check if bkg-map needs to be loaded
    if bkg_apply.value:
        if not bkg_map.value:
            bkg = None
            print 'WARNING: no bkg-map was specified'
        else:
            bkg = Dataset(str(bkg_map.value))
    else:
        bkg = None
    
    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            if not eff_map.value in eff_map_cache:
                eff_map_cache[eff_map.value] = reduction.read_efficiency_cif(str(eff_map.value))
            else:
                print 'Found cached efficiency map ' + str(eff_map.value)
            eff = eff_map_cache[eff_map.value]
    else:
        eff = None
    
    # check if vertical tube correction needs to be loaded
    if vtc_apply.value:
        if not vtc_file.value:
            vtc = None
            print 'WARNING: no vtc-file was specified'
        else:
            vtc = str(vtc_file.value)
    else:
        vtc = None
    
    # check if horizontal tube correction needs to be loaded
    if htc_apply.value:
        if not htc_file.value:
            htc = None
            print 'WARNING: no htc-file was specified'
        else:
            htc = str(htc_file.value)
    else:
        htc = None
        
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    for fn in fns:
        # load dataset
        ds = df[fn]
        # extract basic metadata
        ds = reduction.AddCifMetadata.extract_metadata(ds)
        # remove redundant dimensions
        rs = ds.get_reduced()
        rs.copy_cif_metadata(ds)
        # check if normalized is required 
        if norm_ref:
            ds,norm_tar = reduction.applyNormalization(rs, reference=norm_table[norm_ref], target=norm_tar)
        if bkg:
            ds = reduction.getBackgroundCorrected(ds, bkg, norm_table[norm_ref], norm_tar)
        
        print 'Finished normalisation, background subtraction at %f' % (time.clock()-elapsed)
        # check if vertical tube correction is required
        if vtc:
            ds = reduction.getVerticallyCorrected(ds, vtc)
        print 'Finished vertical offset correction at %f' % (time.clock()-elapsed)
        # check if efficiency correction is required
        if eff:
            ds = reduction.getEfficiencyCorrected(ds, eff)
        
        print 'Finished efficiency correction at %f' % (time.clock()-elapsed)
        # check if horizontal tube correction is required
        if htc:
            ds = reduction.getHorizontallyCorrected(ds, htc)

        print 'Finished horizontal correction at %f' % (time.clock()-elapsed)

        # check if we are recalculating gain
        if regain_apply.value:
           print 'ds.has_key(ms): ' + `ds.__dict__.has_key('ms')`
           ds,gain,esds,chisquared = reduction.do_overlap(ds,regain_iterno.value)
           print 'Have new gains at %f' % (time.clock() - elapsed)
           Plot4 = Plot(title='Chi squared history')
           Plot5 = Plot(title='Final Gain')
           fg = Dataset(gain)
           fg.var = esds
           Plot4.set_dataset(Dataset(chisquared))   #chisquared history
           Plot5.set_dataset(fg)   #final gain plot
        # assemble dataset
        if ds.ndim > 2:
            asm_algo = str(asm_algorithm.value)
            if asm_algo == 'stitch frames':
                ds = reduction.getStitched(ds)
            elif asm_algo == 'sum frames':
                ds = reduction.getSummed(ds)
            else:
                print 'specify assemble algorithm'
                return
        # Display dataset
        print 'Finished stitching at %f' % (time.clock()-elapsed)
        Plot1.set_dataset(ds)
        Plot1.title = ds.title
        if vig_apply_rescale.value:
            ds = reduction.getVerticalIntegrated(ds, axis=0, normalization=float(vig_rescale_target.value),
                                                 cluster=float(vig_cluster.value))
        else:
            ds = reduction.getVerticalIntegrated(ds, axis=0, cluster=float(vig_cluster.value))
        print 'Finished vertical integration at %f' % (time.clock()-elapsed)
        # Display reduced dataset
        Plot2.set_dataset(ds)
        Plot2.title = ds.title
        # Output datasets
        filename_base = join(str(out_folder.value),str(output_stem.value) + basename(str(fn))[:-7])
        if output_cif.value:
            output.write_cif_data(ds,filename_base)
        if output_xyd.value:
            output.write_xyd_data(ds,filename_base)
        if output_fxye.value:
            output.write_fxye_data(ds,filename_base)
        # ds.save_copy(join(str(out_folder.value), 'reduced_' + basename(str(fn))))
        print 'Finished writing data at %f' % (time.clock()-elapsed)
示例#10
0
def __run_script__(fns):
    
    from Reduction import reduction, AddCifMetadata
    from os.path import basename
    from os.path import join
    from Formats import output
    
    df.datasets.clear()
    
    # save user preferences
    prof_names,prof_values = save_user_prefs()

    # check input
    if (fns is None or len(fns) == 0) :
        print 'no input datasets'
        return

    # Store vertical axis information
    rot_info = rot_table[str(rot_axis.value)][0]
    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        norm_tar = norm_target

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]     
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar
            
        # use provided reference value
        else:
            norm_tar = float(norm_tar)
            
    else:
        norm_ref = None
        norm_tar = None
    
    # check if bkg-map needs to be loaded
    if bkg_apply.value:
        if not bkg_map.value:
            bkg = None
            print 'WARNING: no bkg-map was specified'
        else:
            bkg = Dataset(str(bkg_map.value))
    else:
        bkg = None
    
    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            eff = Dataset(str(eff_map.value))
    else:
        eff = None
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    for fn in fns:
        # load dataset
        ds = df[fn]
        # extract basic metadata
        ds = reduction.AddCifMetadata.extract_metadata(ds)
        rs = ds.get_reduced()
        rs = rs * 1.0 # convert to float
        rs.copy_cif_metadata(ds)
        # Get future axis values
        
        # check if normalized is required 
        if norm_ref:
            norm_tar = reduction.applyNormalization(rs, reference=norm_table[norm_ref], target=norm_tar)
        if bkg:
            rs = reduction.getBackgroundCorrected(rs, bkg, norm_table[norm_ref], norm_tar)
        # check if efficiency correction is required
        if eff:
            rs = reduction.getEfficiencyCorrected(rs, eff)
        # Sum all frames vertically after trimming according to requirements
        rs = rs[:,vig_lower_boundary.value:vig_upper_boundary.value,:]
        rs = rs.intg(axis=1).get_reduced()
        rs.copy_cif_metadata(ds)
        # create the axes
        units = rot_table[str(rot_axis.value)][2]
        try:
            rot_values = ds[rot_info]
        except:
            try:
                rot_values = SimpleData(ds.__iNXroot__.findContainerByPath(rot_info))
            except:
                rot_values = arange(rs.shape[1])
                units = 'Step Number'
        stth = ds.stth[0]
        vert_axis_name = rot_table[str(rot_axis.value)][1]
        rs.set_axes([rot_values,stth + ds.axes[2]],['Angle',vert_axis_name],['Degrees',units])
        Plot1.set_dataset(rs)
        Plot1.title = rs.title
        Plot1.x_label = 'Angle (degrees)'
        Plot1.y_label = vert_axis_name + ' (' + units + ')'
        # no output yet
        """   filename_base = join(str(out_folder.value),basename(str(fn))[:-7]+'_'+str(output_stem.value)+"_"+str(target_val))