コード例 #1
0
ファイル: tests.py プロジェクト: Gumtree/Wombat_scripts
 def testBack(self):
     """Test that background subtraction yields correct values when no normalisation
     is necessary."""
     rs = reduction.getBackgroundCorrected(self.test_array,self.back_array)
     self.failUnless(rs.storage[0][0][0] == 15)
     self.failUnless(rs.storage[1][1][1] == 10)
     # testing variances
     self.failUnless(rs.var[0][0][0] == 35)
     self.failUnless(rs.var[1][1][1] == 40)
コード例 #2
0
def __run_script__(fns):

    from Reduction import reduction, AddCifMetadata
    from os.path import basename
    from os.path import join
    import os, re
    import time  #how fast are we going?
    from Formats import output

    elapsed = time.clock()
    print 'Started working at %f' % (time.clock() - elapsed)
    df.datasets.clear()

    # save user preferences
    prof_names, prof_values = save_user_prefs()

    # store current Git versions for data output
    code_versions = {
        "GUI": __UI_gitversion[4:-1],
        "Reduction library": reduction.gitversion[4:-1]
    }
    # check input
    if (fns is None or len(fns) == 0):
        print 'no input datasets'
        return

    # pre-check that we can write the result
#    output_destination = out_folder.value
    output_destination = get_save_path()
    if output_xyd.value or output_fxye.value:
        if not os.access(output_destination, os.W_OK):
            open_error("Unable to write to folder %s" % output_destination)
            return
    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        if norm_ref.strip() == '':
            open_error(
                "You have asked to apply normalisation but not specified any normalisation reference"
            )
            return
        norm_tar = str(norm_target).lower()

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar

        # use provided reference value
        else:
            norm_tar = float(norm_tar)

    else:
        norm_ref = None
        norm_tar = None

    # check if bkg-map needs to be loaded
    if bkg_apply.value:
        if not bkg_map.value:
            bkg = None
            print 'WARNING: no bkg-map was specified'
        else:
            bkg = Dataset(str(bkg_map.value))
    else:
        bkg = None

    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            eff_map_canonical = get_calibration_path() + '/' + eff_map.value
            if eff_map_canonical[0:5] != 'file:':
                eff_map_canonical = 'file:' + eff_map_canonical
            if not eff_map_canonical in eff_map_cache:
                try:
                    eff_map_cache[
                        eff_map_canonical] = reduction.read_efficiency_cif(
                            eff_map_canonical)
                except:
                    open_error("Failed to read efficiency file %s" %
                               eff_map_canonical)
                    return
            else:
                print 'Found in cache ' + ` eff_map_canonical `
        eff = eff_map_cache[eff_map_canonical]
    else:
        eff = None

    # check if vertical tube correction needs to be loaded
    if vtc_apply.value:
        if not vtc_file.value:
            vtc = None
            print 'WARNING: no vtc-file was specified'
        else:
            vtc = get_calibration_path() + '/' + str(vtc_file.value)
    else:
        vtc = None
    # check if horizontal tube correction needs to be loaded
    if htc_apply.value:
        if not htc_file.value:
            htc = None
            print 'WARNING: no htc-file was specified'
        else:
            htc = get_calibration_path() + '/' + str(htc_file.value)
    else:
        htc = None

    reduced_files = []
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    for fn in fns:
        # load dataset
        ds = df[fn]
        # extract basic metadata
        ds = AddCifMetadata.extract_metadata(ds, codeversions=code_versions)
        AddCifMetadata.store_reduction_preferences(ds, prof_names, prof_values)
        # remove redundant dimensions and convert to floating point
        rs = ds.get_reduced() * 1.0
        rs.copy_cif_metadata(ds)
        # check if normalized is required
        if norm_ref:
            ds, norm_tar = reduction.applyNormalization(
                rs, reference=norm_table[norm_ref], target=norm_tar)
        else:
            ds = rs
        if bkg:
            ds = reduction.getBackgroundCorrected(ds, bkg,
                                                  norm_table[norm_ref],
                                                  norm_tar)

        print 'Finished normalisation, background subtraction at %f' % (
            time.clock() - elapsed)
        # check if vertical tube correction is required
        if vtc:
            ds = reduction.getVerticallyCorrected(ds, vtc)
        print 'Finished vertical offset correction at %f' % (time.clock() -
                                                             elapsed)
        # check if efficiency correction is required
        if eff:
            ds = reduction.getEfficiencyCorrected(ds, eff)

        print 'Finished efficiency correction at %f' % (time.clock() - elapsed)
        # Before fiddling with axes, get the ideal stepsize
        stepsize = reduction.get_stepsize(ds)
        print 'Ideal stepsize determined to be %f' % stepsize
        # check if horizontal tube correction is required
        if htc:
            ds = reduction.getHorizontallyCorrected(ds, htc)

        print 'Finished horizontal correction at %f' % (time.clock() - elapsed)
        # Stitching. If we are recalculating gain, this is purely for
        # informational purposes. We don't want to take the 100x time penalty of
        # multiplying a 2D array by the gain factor for each tube, so we
        # stitch on a 1D array after doing the gain re-refinement.
        if ds.ndim > 2:
            stitched = reduction.getStitched(ds,
                                             ignore=str(asm_drop_frames.value))
        # Display dataset
        print 'Finished stitching at %f' % (time.clock() - elapsed)
        Plot1.set_dataset(stitched)
        Plot1.title = stitched.title
        # check if we are recalculating gain
        if regain_apply.value:
            bottom = int(vig_lower_boundary.value)
            top = int(vig_upper_boundary.value)
            cs, gain, esds, chisquared, no_overlaps = reduction.do_overlap(
                ds,
                regain_iterno.value,
                bottom=bottom,
                top=top,
                exact_angles=htc,
                drop_frames=str(asm_drop_frames.value))
            if cs is not None:
                print 'Have new gains at %f' % (time.clock() - elapsed)
                fg = Dataset(gain)
                fg.var = esds
            # set horizontal axis (ideal values)


#               Plot4.set_dataset(Dataset(chisquared))   #chisquared history
#               Plot5.set_dataset(fg)   #final gain plot
            else:
                open_error(
                    "Cannot do gain recalculation as the scan ranges do not overlap."
                )
                return
        if not vig_apply_rescale.value:
            norm_const = -1.0
        else:
            norm_const = float(vig_rescale_target.value)
        # set the cluster value
        if str(vig_cluster.value) in ['Merge', 'Sum']:
            cluster = (stepsize * 0.6, str(vig_cluster.value)
                       )  #60 percent of ideal
        else:
            cluster = (0.0, 'None')
        if not regain_apply.value:  #already done
            final_result = reduction.getVerticalIntegrated(
                stitched,
                axis=0,
                normalization=norm_const,
                cluster=cluster,
                bottom=int(vig_lower_boundary.value),
                top=int(vig_upper_boundary.value))
            print 'Finished vertical integration at %f' % (time.clock() -
                                                           elapsed)
        else:
            if str(vig_cluster.value
                   ) == 'Sum':  #simulate a sum for the gain recalculated value
                cs *= no_overlaps
                info_string = "\nFinal values were multiplied by %d to simulate summation of individual points." % no_overlaps
                cs.add_metadata("_pd_proc_info_data_reduction",
                                info_string,
                                append=True)
            final_result = cs
        # Display reduced dataset
        send_to_plot(final_result, Plot2)
        if copy_acc.value:  #user wants us to accumulate it
            plh_copy_proc()
        # Output datasets
        # Calculate inserted string: %s for sample name, %t for temperature
        stem = str(output_stem.value)
        stem = re.sub(r'[^\w+=()*^@~:{}\[\].%-]', '_', stem)
        if '%s' in stem:
            samplename = final_result.harvest_metadata(
                "CIF")['_pd_spec_special_details']
            name_front = samplename.split()[0]
            stem = stem.replace('%s', name_front)
        if '%t' in stem:
            temperature = 'Unknown_temperature'
            stem = stem.replace('%t', temperature)
        print 'Filename stem is now ' + stem
        filename_base = join(get_save_path(),
                             basename(str(fn))[:-7] + '_' + stem)
        if output_xyd.value or output_fxye.value:  #write CIF if other files written
            output.write_cif_data(final_result, filename_base)
            reduced_files.append(filename_base + '.cif')
        if output_xyd.value:
            output.write_xyd_data(final_result,
                                  filename_base,
                                  codeversions=code_versions)
            reduced_files.append(filename_base + '.xyd')
        if output_fxye.value:
            output.write_fxye_data(final_result,
                                   filename_base,
                                   codeversions=code_versions)
            reduced_files.append(filename_base + '.xye')

        # ds.save_copy(join(str(out_folder.value), 'reduced_' + basename(str(fn))))
        print 'Finished writing data at %f' % (time.clock() - elapsed)

    if len(reduced_files) > 0:
        zip_files(reduced_files,
                  'Echidna_rd_' + str(int(time.time()))[2:] + '.zip')
コード例 #3
0
def __run_script__(fns):
    
    from Reduction import reduction, AddCifMetadata
    from os.path import basename
    from os.path import join
    from Formats import output
    import re
    
    df.datasets.clear()
    
    # save user preferences
    prof_names,prof_values = save_user_prefs()

    # check input
    if (fns is None or len(fns) == 0) :
        print 'no input datasets'
        return

    # set the title for Plot2
    # Plot2.title = 'Plot 2'
    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        norm_tar = norm_target

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]     
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar
            
        # use provided reference value
        else:
            norm_tar = float(norm_tar)
            
    else:
        norm_ref = None
        norm_tar = None
    
    # check if bkg-map needs to be loaded
    if bkg_apply.value:
        if not bkg_map.value:
            bkg = None
            print 'WARNING: no bkg-map was specified'
        else:
            bkg = Dataset(str(bkg_map.value))
            # to avoid complaints in routines that expect it
            reduction.AddCifMetadata.add_metadata_methods(bkg)
    else:
        bkg = None
    
    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            eff = Dataset(str(eff_map.value))
    else:
        eff = None
    # Check for rescale
    if vig_apply_rescale.value:
        vig_normalisation = float(vig_rescale_target.value)
    else:
        vig_normalisation = -1
    group_val = grouping_options[str(output_grouping.value)]
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    for fn in fns:
        # load dataset
        ds = df[fn]
        # extract basic metadata
        ds = reduction.AddCifMetadata.extract_metadata(ds)
        try:
             stth_value = sum(ds.stth)/len(ds.stth) # save for later
             all_stth = ds.stth[:] # also save for later
        except TypeError:
            stth_value = ds.stth
        if ds.ndim > 3:
            rs = ds.get_reduced()
        else:
            rs = ds
        rs = rs * 1.0  #convert to float
        rs.copy_cif_metadata(ds)
        # check if normalized is required 
        if norm_ref:
            norm_tar = reduction.applyNormalization(rs, reference=norm_table[norm_ref], target=norm_tar)
        if bkg:
            rs = reduction.getBackgroundCorrected(rs, bkg, norm_table[norm_ref], norm_tar)
        # check if efficiency correction is required
        assert rs.dtype == Array([1.2,1.3]).dtype
        if eff:
            ds = reduction.getEfficiencyCorrected(rs, eff)
        else:
            ds = rs
        # Calculate inserted string: %s for sample name, %t for temperature
        stem = str(output_stem.value)
        stem = re.sub(r'[^\w+=()*^@~:{}\[\].%-]','_',stem)
        if '%s' in stem:
             samplename = ds.harvest_metadata("CIF")['_pd_spec_special_details']
             name_front = re.sub(r'[^\w+=()*^@~:{}\[\].%-]','_',samplename)
             stem = stem.replace('%s',name_front)
        if '%t1' in stem:
             # get tc1
             temperature = df[fn]["/entry1/sample/tc1/sensor/sensorValueA"]
             print `temperature`
             try:
                 avetemp = sum(temperature)/len(temperature)
             except TypeError:
                 avetemp = temperature
             stem = stem.replace('%t1',"%.0fK" % avetemp)
        print 'Filename stem is now ' + stem
        # restrict output set of frames
        restrict_spec = str(output_restrict.value)
        if ':' in restrict_spec:
            first,last = map(int,restrict_spec.split(':'))
            start_frames = last
            current_frame_start = first
            frameno = first
        else:
            start_frames = len(ds)
            current_frame_start = 0
            frameno = 0
        # perform grouping of sequential input frames   
        while frameno <= start_frames:
            if group_val is None:
                target_val = ""
                final_frame = start_frames-1
                frameno = start_frames
            else:
                stth_value = all_stth[current_frame_start]
                target_val = ds[group_val][current_frame_start]
                try:
                    if ds[frameno][group_val] == target_val:
                        frameno += 1
                        continue
                except:   #Assume an exception is due to too large frameno
                    print 'Exiting frame loop due to error'
            # frameno is the first frame with the wrong values
            cs = ds.get_section([current_frame_start,0,0],[frameno-current_frame_start,ds.shape[1],ds.shape[2]])
            cs.copy_cif_metadata(ds)
            print 'Summing frames from %d to %d, shape %s, start 2th %f' % (current_frame_start,frameno-1,cs.shape,stth_value)
            if target_val != "":
                print 'Corresponding to a target value of ' + `target_val`
            # sum the input frames
            print 'cs axes: ' + cs.axes[0].title + ' ' + cs.axes[1].title + ' ' + cs.axes[2].title
            # es = cs.intg(axis=0)
            es = reduction.getSummed(cs,applyStth=stth_value)  # does axis correction as well
            es.copy_cif_metadata(cs)
            print 'es axes: ' + `es.axes[0].title` + es.axes[1].title
            Plot1.set_dataset(es)
            cs = reduction.getVerticalIntegrated(es, axis=0, normalization=vig_normalisation,
                                                     bottom = int(vig_lower_boundary.value),
                                                     top=int(vig_upper_boundary.value))
            if target_val != "":
                cs.title = cs.title + "_" + str(target_val)
            try:
                send_to_plot(cs,Plot2,add=True,change_title=False)
            except IndexError:  #catch error from GPlot
                send_to_plot(cs,Plot2,add=False,change_title=True)
            # Output datasets
            filename_base = join(str(out_folder.value),basename(str(fn))[:-7]+'_'+stem+"_"+str(target_val))
            if output_cif.value:
                output.write_cif_data(cs,filename_base)
            if output_xyd.value:
                output.write_xyd_data(cs,filename_base)
            if output_fxye.value:
                output.write_fxye_data(cs,filename_base)
            #loop to next group of datasets
            current_frame_start = frameno
            frameno += 1
コード例 #4
0
def __run_script__(fns):
    
    from Reduction import reduction,AddCifMetadata
    from os.path import basename
    from os.path import join
    import os,re
    import time           #how fast are we going?
    from Formats import output
    
    num_step = 9
    prog_bar.max = len(fns) * num_step
    prog_bar.selection = 1

    elapsed = time.clock()
    print 'Started working at %f' % (time.clock()-elapsed)
    df.datasets.clear()
    
    # save user preferences
    prof_names,prof_values = save_user_prefs()

    # store current Git versions for data output
    code_versions = {"GUI":__UI_gitversion[4:-1],
                     "Reduction library":reduction.gitversion[4:-1]}
    # check input
    if (fns is None or len(fns) == 0) :
        print 'no input datasets'
        return

    # pre-check that we can write the result
    output_destination = out_folder.value
    if output_xyd.value or output_fxye.value:
        if not os.access(output_destination,os.W_OK):
            open_error("Unable to write to folder %s" % output_destination)
            return
    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        if norm_ref.strip() == '':
            open_error("You have asked to apply normalisation but not specified any normalisation reference")
            return
        norm_tar = str(norm_target).lower()

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]     
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar
            
        # use provided reference value
        else:
            norm_tar = float(norm_tar)
            
    else:
        norm_ref = None
        norm_tar = None
    
    # check if bkg-map needs to be loaded
    if bkg_apply.value:
        if not bkg_map.value:
            bkg = None
            print 'WARNING: no bkg-map was specified'
        else:
            bkg = Dataset(str(bkg_map.value)).get_reduced()
    else:
        bkg = None
    
    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            eff_map_canonical = str(eff_map.value)
            if eff_map_canonical[0:5] != 'file:':
                eff_map_canonical = 'file:' + eff_map_canonical
            if not eff_map_canonical in eff_map_cache:
                try:
                    eff_map_cache[eff_map_canonical] = reduction.read_efficiency_cif(eff_map_canonical)
                except:
                    open_error("Failed to read efficiency file %s" % eff_map_canonical)
                    return
            else:
                print 'Found in cache ' + `eff_map_canonical`
        eff = eff_map_cache[eff_map_canonical]
    else:
        eff = None
    
    # check if vertical tube correction needs to be loaded
    if vtc_apply.value:
        if not vtc_file.value:
            vtc = None
            print 'WARNING: no vtc-file was specified'
        else:
            vtc = str(vtc_file.value)
    else:
        vtc = None
    # check if horizontal tube correction needs to be loaded
    if htc_apply.value:
        if not htc_file.value:
            htc = None
            print 'WARNING: no htc-file was specified'
        else:
            htc = str(htc_file.value)
    else:
        htc = None

    # check if gain correction needs to be loaded
    regain_data = []
    if regain_load.value:
        if not regain_load_filename.value:
            open_error("You have requested loading of gain correction from a file but no file has been specified")
            return
        rlf = str(regain_load_filename.value)
        regain_data = reduction.load_regain_values(rlf)
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    prog_bar.selection = 2
    fn_idx = 0
    for fn in fns:
        # load dataset
        ds = df[fn]
        if not norm_uniform.value:
            norm_tar = -1   #reinitialise
        try:
            prog_bar.selection = fn_idx * num_step
            # extract basic metadata
            print 'Code versions:' + `code_versions`
            ds = AddCifMetadata.extract_metadata(ds,codeversions=code_versions)
            AddCifMetadata.store_reduction_preferences(ds,prof_names,prof_values)
            # remove redundant dimensions and convert to floating point
            rs = ds.get_reduced()*1.0
            rs.copy_cif_metadata(ds)
            # check if normalized is required 
            if norm_ref:
                ds,norm_tar = reduction.applyNormalization(rs, reference=norm_table[norm_ref], target=norm_tar)
            else:
                ds = rs
            if bkg:
                AddCifMetadata.add_metadata_methods(bkg)
                ds = reduction.getBackgroundCorrected(ds, bkg, norm_table[norm_ref], norm_tar)
            print 'Finished normalisation, background subtraction at %f' % (time.clock()-elapsed)
            prog_bar.selection = fn_idx * num_step + 1
            # check that we have the necessary dimensions
            dims = ds.shape
            if dims[1] != 128:
                rebin_factor = int(dims[1]/128)
                print 'Need to rebin from %d to 128, factor of %d; stand by...' % (dims[1],rebin_factor)
                ds = reduction.rebin(ds,axis=1,factor=rebin_factor)
            else:
                print 'No need to rebin, dataset shape is ' + repr(dims)
            # check if vertical tube correction is required
            if vtc:
                ds = reduction.getVerticallyCorrected(ds, vtc)
            print 'Finished vertical offset correction at %f' % (time.clock()-elapsed)
            prog_bar.selection = fn_idx * num_step + 2
            # check if efficiency correction is required
            if eff:
                ds = reduction.getEfficiencyCorrected(ds, eff)
            
            print 'Finished efficiency correction at %f' % (time.clock()-elapsed)
            prog_bar.selection = fn_idx * num_step + 3
            # Before fiddling with axes, get the ideal stepsize
            stepsize = reduction.get_stepsize(ds)
            print 'Ideal stepsize determined to be %f' % stepsize
            prog_bar.selection = fn_idx * num_step + 4
            # check if horizontal tube correction is required
            if htc:
                ds = reduction.getHorizontallyCorrected(ds, htc)
    
            print 'Finished horizontal correction at %f' % (time.clock()-elapsed)
            prog_bar.selection = fn_idx * num_step + 5
            # Stitching. If we are recalculating gain, this is purely for
            # informational purposes. We don't want to take the 100x time penalty of
            # multiplying a 2D array by the gain factor for each tube, so we
            # stitch using a 1D array after doing the gain re-refinement.
            drop_tubes = str(asm_drop_tubes.value)
            if ds.ndim > 2:
                # See if we are ignoring any tubes
                stitched = reduction.getStitched(ds,ignore=str(asm_drop_frames.value),drop_tubes=drop_tubes)
            # Display dataset
            print 'Finished stitching at %f' % (time.clock()-elapsed)
            prog_bar.selection = fn_idx * num_step + 6
            Plot1.set_dataset(stitched)
            Plot1.title = stitched.title
            n_logger.log_plot(Plot1, footer = Plot1.title)
            # check if we are recalculating gain 
            if regain_apply.value:
               bottom = int(vig_lower_boundary.value)
               top = int(vig_upper_boundary.value)
               dumpfile = None
#               if regain_dump_tubes.value:
#                   dumpfile = filename_base+".tubes"
               cs,gain,esds,chisquared,no_overlaps = reduction.do_overlap(ds,regain_iterno.value,bottom=bottom,top=top,
                                                                          exact_angles=htc,drop_frames=str(asm_drop_frames.value),drop_tubes=drop_tubes,use_gains=regain_data,dumpfile=dumpfile,
                                                                          do_sum=regain_sum.value)
               if cs is not None:
                   print 'Have new gains at %f' % (time.clock() - elapsed)
                   fg = Dataset(gain)
                   fg.var = esds**2
                   # set horizontal axis (ideal values)
                   Plot4.set_dataset(Dataset(chisquared))   #chisquared history
                   Plot5.set_dataset(fg)   #final gain plot
                   # now save the file if requested
                   if regain_store.value and not regain_load.value:
                       gain_comment = "Gains refined from file %s" % fn
                       reduction.store_regain_values(str(regain_store_filename.value),gain,gain_comment)
               else:
                   open_error("Cannot do gain recalculation as the scan ranges do not overlap.")
                   return
            if not vig_apply_rescale.value:
                norm_const = -1.0
            else:
                norm_const = float(vig_rescale_target.value)
            # set the cluster value
            if str(vig_cluster.value) in ['Merge','Sum']:
                cluster = (stepsize * 0.6,str(vig_cluster.value))  #60 percent of ideal
            else:
                cluster = (0.0,'None')
            if not regain_apply.value:  #already done
                final_result = reduction.getVerticalIntegrated(stitched, axis=0, normalization=norm_const,
                                                     cluster=cluster,bottom = int(vig_lower_boundary.value),
                                                     top=int(vig_upper_boundary.value))
                print 'Finished vertical integration at %f' % (time.clock()-elapsed)
            else:
                if str(vig_cluster.value) == 'Sum':  #simulate a sum for the gain recalculated value
                    cs *= no_overlaps
                    info_string = "\nFinal values were multiplied by %d to simulate summation of individual points." % no_overlaps
                    cs.add_metadata("_pd_proc_info_data_reduction",info_string,append=True)
                if norm_const > 0:  #rescale requested but not performed
                    reduction.rescale(cs,norm_const)
                final_result = cs
            prog_bar.selection = fn_idx * num_step + 7
            # Display reduced dataset
            send_to_plot(final_result,Plot2)
            n_logger.log_plot(Plot2, footer = Plot2.title)
            if copy_acc.value:   #user wants us to accumulate it
                plh_copy_proc()
            # Output datasets
            # Calculate inserted string: %s for sample name, %t for temperature
            stem = str(output_stem.value)
            stem = re.sub(r'[^\w+=()*^@~:{}\[\].%-]','_',stem)
            if '%s' in stem:
                 samplename = final_result.harvest_metadata("CIF")['_pd_spec_special_details']
                 name_front = samplename.split()[0]
                 stem = stem.replace('%s',name_front)
            if '%t' in stem:
                 temperature = 'Unknown_temperature'
                 stem = stem.replace('%t',temperature)
            print 'Filename stem is now ' + stem
            filename_base = join(str(out_folder.value),basename(str(fn))[:-7] + '_' + stem)
            if output_xyd.value or output_fxye.value or output_topas.value:  #write CIF if other files written
                output.write_cif_data(final_result,filename_base)
            if output_xyd.value:
                add_header = output_naked.value
                output.write_xyd_data(final_result,filename_base,codeversions=code_versions,naked=add_header)
            if output_fxye.value:
                output.write_fxye_data(final_result,filename_base,codeversions=code_versions)
            if output_topas.value:
                output.write_xyd_data(final_result,filename_base,codeversions=code_versions,comment_char="!",extension='topas')
            # ds.save_copy(join(str(out_folder.value), 'reduced_' + basename(str(fn))))
            print 'Finished writing data at %f' % (time.clock()-elapsed)
            prog_bar.selection = fn_idx * num_step + 8
            fn_idx += 1
        finally:
            df[fn].close()
            prog_bar.selection = 0
コード例 #5
0
ファイル: SplitScans.py プロジェクト: Gumtree/Echidna_scripts
def __run_script__(fns):
    
    from Reduction import reduction
    from os.path import basename
    from os.path import join
    import time           #how fast are we going?
    import AddCifMetadata,output
    
    elapsed = time.clock()
    print 'Started working on Split Scans at %f' % (time.clock()-elapsed)
    df.datasets.clear()
    
    # check input
    if (fns is None or len(fns) == 0) :
        print 'no input datasets'
        return

    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        norm_tar = str(norm_target.value).lower()

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]     
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar
            
        # use provided reference value
        else:
            norm_tar = float(norm_tar)
            
    else:
        norm_ref = None
        norm_tar = None
    
    # check if bkg-map needs to be loaded
    if bkg_apply.value:
        if not bkg_map.value:
            bkg = None
            print 'WARNING: no bkg-map was specified'
        else:
            bkg = Dataset(str(bkg_map.value))
    else:
        bkg = None
    
    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            eff = reduction.read_efficiency_cif(str(eff_map.value))
            if eff.ndim != 2:
                raise AttributeError('eff.ndim != 2')
    else:
        eff = None
    
    # check if vertical tube correction needs to be loaded
    if vtc_apply.value:
        if not vtc_file.value:
            vtc = None
            print 'WARNING: no vtc-file was specified'
        else:
            vtc = str(vtc_file.value)
    else:
        vtc = None
    
    # check if horizontal tube correction needs to be loaded
    if htc_apply.value:
        if not htc_file.value:
            htc = None
            print 'WARNING: no htc-file was specified'
        else:
            htc = str(htc_file.value)
    else:
        htc = None
        
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    for fn in fns:
        # load dataset
        ds = df[fn]
        # extract basic metadata
        ds = reduction.AddCifMetadata.extract_metadata(ds)
        # remove redundant dimensions
        ds = ds.get_reduced()
        # check if normalized is required 
        if norm_ref:
            norm_tar = reduction.applyNormalization(ds, reference=norm_table[norm_ref], target=norm_tar)
        if bkg:
            ds = reduction.getBackgroundCorrected(ds, bkg, norm_table[norm_ref], norm_tar)
        
        print 'Finished normalisation, background subtraction at %f' % (time.clock()-elapsed)
        # check if vertical tube correction is required
        if vtc:
            ds = reduction.getVerticallyCorrected(ds, vtc)
        print 'Finished vertical offset correction at %f' % (time.clock()-elapsed)
        # check if efficiency correction is required
        if eff:
            ds = reduction.getEfficiencyCorrected(ds, eff)
        
        print 'Finished efficiency correction at %f' % (time.clock()-elapsed)
        # check if horizontal tube correction is required
        if htc:
            ds = reduction.getHorizontallyCorrected(ds, htc)

        print 'Finished horizontal correction at %f' % (time.clock()-elapsed)
        if vig_apply_rescale.value:
            ds = reduction.getVerticalIntegrated(ds, normalization=float(vig_rescale_target.value))
        else:
            ds = reduction.getVerticalIntegrated(ds)
        print 'Finished vertical integration at %f' % (time.clock()-elapsed)
        Plot1.clear()
        Plot1.set_dataset(ds)
        # output.write_cif_data(ds,join(str(out_folder.value), 'reduced_' + basename(str(fn))[:-7]))
        # Now write out each tube's data
        output.dump_tubes(ds,join(str(out_folder.value), 'split_' + basename(str(fn))[:-7]))
        print 'Finished writing data at %f' % (time.clock()-elapsed)
コード例 #6
0
ファイル: tests.py プロジェクト: Gumtree/Wombat_scripts
 def testNormBack(self):
     """Test that normalisation is correctly performed"""
     rs = reduction.getBackgroundCorrected(self.test_array,self.back_array,norm_ref='bm1_counts',norm_target=5.0)
     self.assertEqual(rs.storage[0][0][0] , self.core_array2[0][0]-self.back_vals[0][0]/2)
     self.assertEqual(rs.storage[1][1][1] , self.core_array2[1][1]-self.back_vals[1][1]*1.25)
     self.assertEqual(rs.storage[2][1][1] , self.core_array2[1][1]-self.back_array[1][1]/2)
コード例 #7
0
ファイル: tests.py プロジェクト: Gumtree/Wombat_scripts
 def testBackMeta(self):
     """Test that the metadata is inserted and returned correctly"""
     rs = reduction.getBackgroundCorrected(self.test_array,self.back_array,norm_ref='bm1_counts',norm_target=5.0)
     pf = rs.harvest_metadata("CIF")
     self.failUnless("subtracted using" in str(pf["_pd_proc_info_data_reduction"]))
     self.failUnless("normalising to %f using monitor bm1_counts" % 5.0 in str(pf["_pd_proc_info_data_reduction"]))
コード例 #8
0
ファイル: tests.py プロジェクト: jamesrhester/Echidna_scripts
 def testNormBack(self):
     """Test that normalisation is correctly performed"""
     rs = reduction.getBackgroundCorrected(self.test_array,self.back_array,norm_ref='bm1_counts',norm_target=5.0)
     self.failUnless(rs.storage[0][0][0] == self.test_array[0][0][0]-self.back_array[0][0][0]/2)
     self.failUnless(rs.storage[1][1][1] == self.test_array[1][1][1]-self.back_array[1][1][1]*1.25)
     self.failUnless(rs.storage[2][1][1] == self.test_array[2][1][1]-self.back_array[2][1][1]/2)
コード例 #9
0
def __run_script__(fns):
    
    from Reduction import reduction
    from os.path import basename
    from os.path import join
    import time           #how fast are we going?
    import AddCifMetadata,output
    
    elapsed = time.clock()
    print 'Started working at %f' % (time.clock()-elapsed)
    df.datasets.clear()
    
    # check input
    if (fns is None or len(fns) == 0) :
        print 'no input datasets'
        return

    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        norm_tar = str(norm_target.value).lower()

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]     
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar
            
        # use provided reference value
        else:
            norm_tar = float(norm_tar)
            
    else:
        norm_ref = None
        norm_tar = None
    
    # check if bkg-map needs to be loaded
    if bkg_apply.value:
        if not bkg_map.value:
            bkg = None
            print 'WARNING: no bkg-map was specified'
        else:
            bkg = Dataset(str(bkg_map.value))
    else:
        bkg = None
    
    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            if not eff_map.value in eff_map_cache:
                eff_map_cache[eff_map.value] = reduction.read_efficiency_cif(str(eff_map.value))
            else:
                print 'Found cached efficiency map ' + str(eff_map.value)
            eff = eff_map_cache[eff_map.value]
    else:
        eff = None
    
    # check if vertical tube correction needs to be loaded
    if vtc_apply.value:
        if not vtc_file.value:
            vtc = None
            print 'WARNING: no vtc-file was specified'
        else:
            vtc = str(vtc_file.value)
    else:
        vtc = None
    
    # check if horizontal tube correction needs to be loaded
    if htc_apply.value:
        if not htc_file.value:
            htc = None
            print 'WARNING: no htc-file was specified'
        else:
            htc = str(htc_file.value)
    else:
        htc = None
        
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    for fn in fns:
        # load dataset
        ds = df[fn]
        # extract basic metadata
        ds = reduction.AddCifMetadata.extract_metadata(ds)
        # remove redundant dimensions
        rs = ds.get_reduced()
        rs.copy_cif_metadata(ds)
        # check if normalized is required 
        if norm_ref:
            ds,norm_tar = reduction.applyNormalization(rs, reference=norm_table[norm_ref], target=norm_tar)
        if bkg:
            ds = reduction.getBackgroundCorrected(ds, bkg, norm_table[norm_ref], norm_tar)
        
        print 'Finished normalisation, background subtraction at %f' % (time.clock()-elapsed)
        # check if vertical tube correction is required
        if vtc:
            ds = reduction.getVerticallyCorrected(ds, vtc)
        print 'Finished vertical offset correction at %f' % (time.clock()-elapsed)
        # check if efficiency correction is required
        if eff:
            ds = reduction.getEfficiencyCorrected(ds, eff)
        
        print 'Finished efficiency correction at %f' % (time.clock()-elapsed)
        # check if horizontal tube correction is required
        if htc:
            ds = reduction.getHorizontallyCorrected(ds, htc)

        print 'Finished horizontal correction at %f' % (time.clock()-elapsed)

        # check if we are recalculating gain
        if regain_apply.value:
           print 'ds.has_key(ms): ' + `ds.__dict__.has_key('ms')`
           ds,gain,esds,chisquared = reduction.do_overlap(ds,regain_iterno.value)
           print 'Have new gains at %f' % (time.clock() - elapsed)
           Plot4 = Plot(title='Chi squared history')
           Plot5 = Plot(title='Final Gain')
           fg = Dataset(gain)
           fg.var = esds
           Plot4.set_dataset(Dataset(chisquared))   #chisquared history
           Plot5.set_dataset(fg)   #final gain plot
        # assemble dataset
        if ds.ndim > 2:
            asm_algo = str(asm_algorithm.value)
            if asm_algo == 'stitch frames':
                ds = reduction.getStitched(ds)
            elif asm_algo == 'sum frames':
                ds = reduction.getSummed(ds)
            else:
                print 'specify assemble algorithm'
                return
        # Display dataset
        print 'Finished stitching at %f' % (time.clock()-elapsed)
        Plot1.set_dataset(ds)
        Plot1.title = ds.title
        if vig_apply_rescale.value:
            ds = reduction.getVerticalIntegrated(ds, axis=0, normalization=float(vig_rescale_target.value),
                                                 cluster=float(vig_cluster.value))
        else:
            ds = reduction.getVerticalIntegrated(ds, axis=0, cluster=float(vig_cluster.value))
        print 'Finished vertical integration at %f' % (time.clock()-elapsed)
        # Display reduced dataset
        Plot2.set_dataset(ds)
        Plot2.title = ds.title
        # Output datasets
        filename_base = join(str(out_folder.value),str(output_stem.value) + basename(str(fn))[:-7])
        if output_cif.value:
            output.write_cif_data(ds,filename_base)
        if output_xyd.value:
            output.write_xyd_data(ds,filename_base)
        if output_fxye.value:
            output.write_fxye_data(ds,filename_base)
        # ds.save_copy(join(str(out_folder.value), 'reduced_' + basename(str(fn))))
        print 'Finished writing data at %f' % (time.clock()-elapsed)
コード例 #10
0
def __run_script__(fns):
    
    from Reduction import reduction, AddCifMetadata
    from os.path import basename
    from os.path import join
    from Formats import output
    
    df.datasets.clear()
    
    # save user preferences
    prof_names,prof_values = save_user_prefs()

    # check input
    if (fns is None or len(fns) == 0) :
        print 'no input datasets'
        return

    # Store vertical axis information
    rot_info = rot_table[str(rot_axis.value)][0]
    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        norm_tar = norm_target

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]     
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar
            
        # use provided reference value
        else:
            norm_tar = float(norm_tar)
            
    else:
        norm_ref = None
        norm_tar = None
    
    # check if bkg-map needs to be loaded
    if bkg_apply.value:
        if not bkg_map.value:
            bkg = None
            print 'WARNING: no bkg-map was specified'
        else:
            bkg = Dataset(str(bkg_map.value))
    else:
        bkg = None
    
    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            eff = Dataset(str(eff_map.value))
    else:
        eff = None
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    for fn in fns:
        # load dataset
        ds = df[fn]
        # extract basic metadata
        ds = reduction.AddCifMetadata.extract_metadata(ds)
        rs = ds.get_reduced()
        rs = rs * 1.0 # convert to float
        rs.copy_cif_metadata(ds)
        # Get future axis values
        
        # check if normalized is required 
        if norm_ref:
            norm_tar = reduction.applyNormalization(rs, reference=norm_table[norm_ref], target=norm_tar)
        if bkg:
            rs = reduction.getBackgroundCorrected(rs, bkg, norm_table[norm_ref], norm_tar)
        # check if efficiency correction is required
        if eff:
            rs = reduction.getEfficiencyCorrected(rs, eff)
        # Sum all frames vertically after trimming according to requirements
        rs = rs[:,vig_lower_boundary.value:vig_upper_boundary.value,:]
        rs = rs.intg(axis=1).get_reduced()
        rs.copy_cif_metadata(ds)
        # create the axes
        units = rot_table[str(rot_axis.value)][2]
        try:
            rot_values = ds[rot_info]
        except:
            try:
                rot_values = SimpleData(ds.__iNXroot__.findContainerByPath(rot_info))
            except:
                rot_values = arange(rs.shape[1])
                units = 'Step Number'
        stth = ds.stth[0]
        vert_axis_name = rot_table[str(rot_axis.value)][1]
        rs.set_axes([rot_values,stth + ds.axes[2]],['Angle',vert_axis_name],['Degrees',units])
        Plot1.set_dataset(rs)
        Plot1.title = rs.title
        Plot1.x_label = 'Angle (degrees)'
        Plot1.y_label = vert_axis_name + ' (' + units + ')'
        # no output yet
        """   filename_base = join(str(out_folder.value),basename(str(fn))[:-7]+'_'+str(output_stem.value)+"_"+str(target_val))