def eff_show_proc():
    from Reduction import reduction
    if not eff_map.value in eff_map_cache:
        eff_map_cache[eff_map.value] = reduction.read_efficiency_cif(eff_map.value)
    else:
        print 'Found in cache ' + `eff_map_cache[eff_map.value]`
    Plot1.set_dataset(eff_map_cache[eff_map.value][0])
    Plot1.title = 'Efficiency map'
Ejemplo n.º 2
0
def eff_show_proc():
    from Reduction import reduction
    eff_map_canonical = eff_map.value
    if eff_map.value[0:5] != 'file:':
        eff_map_canonical = 'file:' + eff_map.value
    if not eff_map_canonical in eff_map_cache:
        eff_map_cache[eff_map_canonical] = reduction.read_efficiency_cif(eff_map_canonical)
    else:
        print 'Found in cache ' + `eff_map_cache[eff_map_canonical]`
    Plot1.clear()
    Plot1.set_dataset(eff_map_cache[eff_map_canonical][0])
    Plot1.title = 'Efficiency map'  #add info to this title!
Ejemplo n.º 3
0
def eff_show_proc():
    from Reduction import reduction
    eff_map_canonical = get_calibration_path() + '/' + eff_map.value
    if eff_map_canonical[0:5] != 'file:':
        eff_map_canonical = 'file:' + eff_map_canonical
    if not eff_map_canonical in eff_map_cache:
        eff_map_cache[eff_map_canonical] = reduction.read_efficiency_cif(
            eff_map_canonical)
    else:
        print 'Found in cache ' + ` eff_map_cache[eff_map_canonical] `
    Plot1.clear()
    Plot1.set_dataset(eff_map_cache[eff_map_canonical][0])
    Plot1.title = 'Efficiency map'  #add info to this title!
Ejemplo n.º 4
0
def eff_show_proc():
    from Reduction import reduction
    #print "Map cache is " + `eff_map_cache`
    #print "Our current key is " + `eff_map.value`
    #print "Our value is" + `eff_map_cache.get(eff_map.value)`
    if not eff_map.value in eff_map_cache:
        eff_map_cache[eff_map.value] = reduction.read_efficiency_cif(eff_map.value)
    else:
        print 'Found in cache ' + `eff_map_cache[eff_map.value]`
    Plot1.clear()
    # print 'Plotting ' + `eff_map_cache[eff_map.value]`
    Plot1.set_dataset(eff_map_cache[eff_map.value])
    Plot1.title = 'Efficiency map'  #add info to this title!
Ejemplo n.º 5
0
def __run_script__(fns):

    from Reduction import reduction, AddCifMetadata
    from os.path import basename
    from os.path import join
    import os, re
    import time  #how fast are we going?
    from Formats import output

    elapsed = time.clock()
    print 'Started working at %f' % (time.clock() - elapsed)
    df.datasets.clear()

    # save user preferences
    prof_names, prof_values = save_user_prefs()

    # store current Git versions for data output
    code_versions = {
        "GUI": __UI_gitversion[4:-1],
        "Reduction library": reduction.gitversion[4:-1]
    }
    # check input
    if (fns is None or len(fns) == 0):
        print 'no input datasets'
        return

    # pre-check that we can write the result
#    output_destination = out_folder.value
    output_destination = get_save_path()
    if output_xyd.value or output_fxye.value:
        if not os.access(output_destination, os.W_OK):
            open_error("Unable to write to folder %s" % output_destination)
            return
    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        if norm_ref.strip() == '':
            open_error(
                "You have asked to apply normalisation but not specified any normalisation reference"
            )
            return
        norm_tar = str(norm_target).lower()

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar

        # use provided reference value
        else:
            norm_tar = float(norm_tar)

    else:
        norm_ref = None
        norm_tar = None

    # check if bkg-map needs to be loaded
    if bkg_apply.value:
        if not bkg_map.value:
            bkg = None
            print 'WARNING: no bkg-map was specified'
        else:
            bkg = Dataset(str(bkg_map.value))
    else:
        bkg = None

    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            eff_map_canonical = get_calibration_path() + '/' + eff_map.value
            if eff_map_canonical[0:5] != 'file:':
                eff_map_canonical = 'file:' + eff_map_canonical
            if not eff_map_canonical in eff_map_cache:
                try:
                    eff_map_cache[
                        eff_map_canonical] = reduction.read_efficiency_cif(
                            eff_map_canonical)
                except:
                    open_error("Failed to read efficiency file %s" %
                               eff_map_canonical)
                    return
            else:
                print 'Found in cache ' + ` eff_map_canonical `
        eff = eff_map_cache[eff_map_canonical]
    else:
        eff = None

    # check if vertical tube correction needs to be loaded
    if vtc_apply.value:
        if not vtc_file.value:
            vtc = None
            print 'WARNING: no vtc-file was specified'
        else:
            vtc = get_calibration_path() + '/' + str(vtc_file.value)
    else:
        vtc = None
    # check if horizontal tube correction needs to be loaded
    if htc_apply.value:
        if not htc_file.value:
            htc = None
            print 'WARNING: no htc-file was specified'
        else:
            htc = get_calibration_path() + '/' + str(htc_file.value)
    else:
        htc = None

    reduced_files = []
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    for fn in fns:
        # load dataset
        ds = df[fn]
        # extract basic metadata
        ds = AddCifMetadata.extract_metadata(ds, codeversions=code_versions)
        AddCifMetadata.store_reduction_preferences(ds, prof_names, prof_values)
        # remove redundant dimensions and convert to floating point
        rs = ds.get_reduced() * 1.0
        rs.copy_cif_metadata(ds)
        # check if normalized is required
        if norm_ref:
            ds, norm_tar = reduction.applyNormalization(
                rs, reference=norm_table[norm_ref], target=norm_tar)
        else:
            ds = rs
        if bkg:
            ds = reduction.getBackgroundCorrected(ds, bkg,
                                                  norm_table[norm_ref],
                                                  norm_tar)

        print 'Finished normalisation, background subtraction at %f' % (
            time.clock() - elapsed)
        # check if vertical tube correction is required
        if vtc:
            ds = reduction.getVerticallyCorrected(ds, vtc)
        print 'Finished vertical offset correction at %f' % (time.clock() -
                                                             elapsed)
        # check if efficiency correction is required
        if eff:
            ds = reduction.getEfficiencyCorrected(ds, eff)

        print 'Finished efficiency correction at %f' % (time.clock() - elapsed)
        # Before fiddling with axes, get the ideal stepsize
        stepsize = reduction.get_stepsize(ds)
        print 'Ideal stepsize determined to be %f' % stepsize
        # check if horizontal tube correction is required
        if htc:
            ds = reduction.getHorizontallyCorrected(ds, htc)

        print 'Finished horizontal correction at %f' % (time.clock() - elapsed)
        # Stitching. If we are recalculating gain, this is purely for
        # informational purposes. We don't want to take the 100x time penalty of
        # multiplying a 2D array by the gain factor for each tube, so we
        # stitch on a 1D array after doing the gain re-refinement.
        if ds.ndim > 2:
            stitched = reduction.getStitched(ds,
                                             ignore=str(asm_drop_frames.value))
        # Display dataset
        print 'Finished stitching at %f' % (time.clock() - elapsed)
        Plot1.set_dataset(stitched)
        Plot1.title = stitched.title
        # check if we are recalculating gain
        if regain_apply.value:
            bottom = int(vig_lower_boundary.value)
            top = int(vig_upper_boundary.value)
            cs, gain, esds, chisquared, no_overlaps = reduction.do_overlap(
                ds,
                regain_iterno.value,
                bottom=bottom,
                top=top,
                exact_angles=htc,
                drop_frames=str(asm_drop_frames.value))
            if cs is not None:
                print 'Have new gains at %f' % (time.clock() - elapsed)
                fg = Dataset(gain)
                fg.var = esds
            # set horizontal axis (ideal values)


#               Plot4.set_dataset(Dataset(chisquared))   #chisquared history
#               Plot5.set_dataset(fg)   #final gain plot
            else:
                open_error(
                    "Cannot do gain recalculation as the scan ranges do not overlap."
                )
                return
        if not vig_apply_rescale.value:
            norm_const = -1.0
        else:
            norm_const = float(vig_rescale_target.value)
        # set the cluster value
        if str(vig_cluster.value) in ['Merge', 'Sum']:
            cluster = (stepsize * 0.6, str(vig_cluster.value)
                       )  #60 percent of ideal
        else:
            cluster = (0.0, 'None')
        if not regain_apply.value:  #already done
            final_result = reduction.getVerticalIntegrated(
                stitched,
                axis=0,
                normalization=norm_const,
                cluster=cluster,
                bottom=int(vig_lower_boundary.value),
                top=int(vig_upper_boundary.value))
            print 'Finished vertical integration at %f' % (time.clock() -
                                                           elapsed)
        else:
            if str(vig_cluster.value
                   ) == 'Sum':  #simulate a sum for the gain recalculated value
                cs *= no_overlaps
                info_string = "\nFinal values were multiplied by %d to simulate summation of individual points." % no_overlaps
                cs.add_metadata("_pd_proc_info_data_reduction",
                                info_string,
                                append=True)
            final_result = cs
        # Display reduced dataset
        send_to_plot(final_result, Plot2)
        if copy_acc.value:  #user wants us to accumulate it
            plh_copy_proc()
        # Output datasets
        # Calculate inserted string: %s for sample name, %t for temperature
        stem = str(output_stem.value)
        stem = re.sub(r'[^\w+=()*^@~:{}\[\].%-]', '_', stem)
        if '%s' in stem:
            samplename = final_result.harvest_metadata(
                "CIF")['_pd_spec_special_details']
            name_front = samplename.split()[0]
            stem = stem.replace('%s', name_front)
        if '%t' in stem:
            temperature = 'Unknown_temperature'
            stem = stem.replace('%t', temperature)
        print 'Filename stem is now ' + stem
        filename_base = join(get_save_path(),
                             basename(str(fn))[:-7] + '_' + stem)
        if output_xyd.value or output_fxye.value:  #write CIF if other files written
            output.write_cif_data(final_result, filename_base)
            reduced_files.append(filename_base + '.cif')
        if output_xyd.value:
            output.write_xyd_data(final_result,
                                  filename_base,
                                  codeversions=code_versions)
            reduced_files.append(filename_base + '.xyd')
        if output_fxye.value:
            output.write_fxye_data(final_result,
                                   filename_base,
                                   codeversions=code_versions)
            reduced_files.append(filename_base + '.xye')

        # ds.save_copy(join(str(out_folder.value), 'reduced_' + basename(str(fn))))
        print 'Finished writing data at %f' % (time.clock() - elapsed)

    if len(reduced_files) > 0:
        zip_files(reduced_files,
                  'Echidna_rd_' + str(int(time.time()))[2:] + '.zip')
Ejemplo n.º 6
0
def __run_script__(fns):
    
    from Reduction import reduction,AddCifMetadata
    from os.path import basename
    from os.path import join
    import os,re
    import time           #how fast are we going?
    from Formats import output
    
    num_step = 9
    prog_bar.max = len(fns) * num_step
    prog_bar.selection = 1

    elapsed = time.clock()
    print 'Started working at %f' % (time.clock()-elapsed)
    df.datasets.clear()
    
    # save user preferences
    prof_names,prof_values = save_user_prefs()

    # store current Git versions for data output
    code_versions = {"GUI":__UI_gitversion[4:-1],
                     "Reduction library":reduction.gitversion[4:-1]}
    # check input
    if (fns is None or len(fns) == 0) :
        print 'no input datasets'
        return

    # pre-check that we can write the result
    output_destination = out_folder.value
    if output_xyd.value or output_fxye.value:
        if not os.access(output_destination,os.W_OK):
            open_error("Unable to write to folder %s" % output_destination)
            return
    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        if norm_ref.strip() == '':
            open_error("You have asked to apply normalisation but not specified any normalisation reference")
            return
        norm_tar = str(norm_target).lower()

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]     
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar
            
        # use provided reference value
        else:
            norm_tar = float(norm_tar)
            
    else:
        norm_ref = None
        norm_tar = None
    
    # check if bkg-map needs to be loaded
    if bkg_apply.value:
        if not bkg_map.value:
            bkg = None
            print 'WARNING: no bkg-map was specified'
        else:
            bkg = Dataset(str(bkg_map.value)).get_reduced()
    else:
        bkg = None
    
    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            eff_map_canonical = str(eff_map.value)
            if eff_map_canonical[0:5] != 'file:':
                eff_map_canonical = 'file:' + eff_map_canonical
            if not eff_map_canonical in eff_map_cache:
                try:
                    eff_map_cache[eff_map_canonical] = reduction.read_efficiency_cif(eff_map_canonical)
                except:
                    open_error("Failed to read efficiency file %s" % eff_map_canonical)
                    return
            else:
                print 'Found in cache ' + `eff_map_canonical`
        eff = eff_map_cache[eff_map_canonical]
    else:
        eff = None
    
    # check if vertical tube correction needs to be loaded
    if vtc_apply.value:
        if not vtc_file.value:
            vtc = None
            print 'WARNING: no vtc-file was specified'
        else:
            vtc = str(vtc_file.value)
    else:
        vtc = None
    # check if horizontal tube correction needs to be loaded
    if htc_apply.value:
        if not htc_file.value:
            htc = None
            print 'WARNING: no htc-file was specified'
        else:
            htc = str(htc_file.value)
    else:
        htc = None

    # check if gain correction needs to be loaded
    regain_data = []
    if regain_load.value:
        if not regain_load_filename.value:
            open_error("You have requested loading of gain correction from a file but no file has been specified")
            return
        rlf = str(regain_load_filename.value)
        regain_data = reduction.load_regain_values(rlf)
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    prog_bar.selection = 2
    fn_idx = 0
    for fn in fns:
        # load dataset
        ds = df[fn]
        if not norm_uniform.value:
            norm_tar = -1   #reinitialise
        try:
            prog_bar.selection = fn_idx * num_step
            # extract basic metadata
            print 'Code versions:' + `code_versions`
            ds = AddCifMetadata.extract_metadata(ds,codeversions=code_versions)
            AddCifMetadata.store_reduction_preferences(ds,prof_names,prof_values)
            # remove redundant dimensions and convert to floating point
            rs = ds.get_reduced()*1.0
            rs.copy_cif_metadata(ds)
            # check if normalized is required 
            if norm_ref:
                ds,norm_tar = reduction.applyNormalization(rs, reference=norm_table[norm_ref], target=norm_tar)
            else:
                ds = rs
            if bkg:
                AddCifMetadata.add_metadata_methods(bkg)
                ds = reduction.getBackgroundCorrected(ds, bkg, norm_table[norm_ref], norm_tar)
            print 'Finished normalisation, background subtraction at %f' % (time.clock()-elapsed)
            prog_bar.selection = fn_idx * num_step + 1
            # check that we have the necessary dimensions
            dims = ds.shape
            if dims[1] != 128:
                rebin_factor = int(dims[1]/128)
                print 'Need to rebin from %d to 128, factor of %d; stand by...' % (dims[1],rebin_factor)
                ds = reduction.rebin(ds,axis=1,factor=rebin_factor)
            else:
                print 'No need to rebin, dataset shape is ' + repr(dims)
            # check if vertical tube correction is required
            if vtc:
                ds = reduction.getVerticallyCorrected(ds, vtc)
            print 'Finished vertical offset correction at %f' % (time.clock()-elapsed)
            prog_bar.selection = fn_idx * num_step + 2
            # check if efficiency correction is required
            if eff:
                ds = reduction.getEfficiencyCorrected(ds, eff)
            
            print 'Finished efficiency correction at %f' % (time.clock()-elapsed)
            prog_bar.selection = fn_idx * num_step + 3
            # Before fiddling with axes, get the ideal stepsize
            stepsize = reduction.get_stepsize(ds)
            print 'Ideal stepsize determined to be %f' % stepsize
            prog_bar.selection = fn_idx * num_step + 4
            # check if horizontal tube correction is required
            if htc:
                ds = reduction.getHorizontallyCorrected(ds, htc)
    
            print 'Finished horizontal correction at %f' % (time.clock()-elapsed)
            prog_bar.selection = fn_idx * num_step + 5
            # Stitching. If we are recalculating gain, this is purely for
            # informational purposes. We don't want to take the 100x time penalty of
            # multiplying a 2D array by the gain factor for each tube, so we
            # stitch using a 1D array after doing the gain re-refinement.
            drop_tubes = str(asm_drop_tubes.value)
            if ds.ndim > 2:
                # See if we are ignoring any tubes
                stitched = reduction.getStitched(ds,ignore=str(asm_drop_frames.value),drop_tubes=drop_tubes)
            # Display dataset
            print 'Finished stitching at %f' % (time.clock()-elapsed)
            prog_bar.selection = fn_idx * num_step + 6
            Plot1.set_dataset(stitched)
            Plot1.title = stitched.title
            n_logger.log_plot(Plot1, footer = Plot1.title)
            # check if we are recalculating gain 
            if regain_apply.value:
               bottom = int(vig_lower_boundary.value)
               top = int(vig_upper_boundary.value)
               dumpfile = None
#               if regain_dump_tubes.value:
#                   dumpfile = filename_base+".tubes"
               cs,gain,esds,chisquared,no_overlaps = reduction.do_overlap(ds,regain_iterno.value,bottom=bottom,top=top,
                                                                          exact_angles=htc,drop_frames=str(asm_drop_frames.value),drop_tubes=drop_tubes,use_gains=regain_data,dumpfile=dumpfile,
                                                                          do_sum=regain_sum.value)
               if cs is not None:
                   print 'Have new gains at %f' % (time.clock() - elapsed)
                   fg = Dataset(gain)
                   fg.var = esds**2
                   # set horizontal axis (ideal values)
                   Plot4.set_dataset(Dataset(chisquared))   #chisquared history
                   Plot5.set_dataset(fg)   #final gain plot
                   # now save the file if requested
                   if regain_store.value and not regain_load.value:
                       gain_comment = "Gains refined from file %s" % fn
                       reduction.store_regain_values(str(regain_store_filename.value),gain,gain_comment)
               else:
                   open_error("Cannot do gain recalculation as the scan ranges do not overlap.")
                   return
            if not vig_apply_rescale.value:
                norm_const = -1.0
            else:
                norm_const = float(vig_rescale_target.value)
            # set the cluster value
            if str(vig_cluster.value) in ['Merge','Sum']:
                cluster = (stepsize * 0.6,str(vig_cluster.value))  #60 percent of ideal
            else:
                cluster = (0.0,'None')
            if not regain_apply.value:  #already done
                final_result = reduction.getVerticalIntegrated(stitched, axis=0, normalization=norm_const,
                                                     cluster=cluster,bottom = int(vig_lower_boundary.value),
                                                     top=int(vig_upper_boundary.value))
                print 'Finished vertical integration at %f' % (time.clock()-elapsed)
            else:
                if str(vig_cluster.value) == 'Sum':  #simulate a sum for the gain recalculated value
                    cs *= no_overlaps
                    info_string = "\nFinal values were multiplied by %d to simulate summation of individual points." % no_overlaps
                    cs.add_metadata("_pd_proc_info_data_reduction",info_string,append=True)
                if norm_const > 0:  #rescale requested but not performed
                    reduction.rescale(cs,norm_const)
                final_result = cs
            prog_bar.selection = fn_idx * num_step + 7
            # Display reduced dataset
            send_to_plot(final_result,Plot2)
            n_logger.log_plot(Plot2, footer = Plot2.title)
            if copy_acc.value:   #user wants us to accumulate it
                plh_copy_proc()
            # Output datasets
            # Calculate inserted string: %s for sample name, %t for temperature
            stem = str(output_stem.value)
            stem = re.sub(r'[^\w+=()*^@~:{}\[\].%-]','_',stem)
            if '%s' in stem:
                 samplename = final_result.harvest_metadata("CIF")['_pd_spec_special_details']
                 name_front = samplename.split()[0]
                 stem = stem.replace('%s',name_front)
            if '%t' in stem:
                 temperature = 'Unknown_temperature'
                 stem = stem.replace('%t',temperature)
            print 'Filename stem is now ' + stem
            filename_base = join(str(out_folder.value),basename(str(fn))[:-7] + '_' + stem)
            if output_xyd.value or output_fxye.value or output_topas.value:  #write CIF if other files written
                output.write_cif_data(final_result,filename_base)
            if output_xyd.value:
                add_header = output_naked.value
                output.write_xyd_data(final_result,filename_base,codeversions=code_versions,naked=add_header)
            if output_fxye.value:
                output.write_fxye_data(final_result,filename_base,codeversions=code_versions)
            if output_topas.value:
                output.write_xyd_data(final_result,filename_base,codeversions=code_versions,comment_char="!",extension='topas')
            # ds.save_copy(join(str(out_folder.value), 'reduced_' + basename(str(fn))))
            print 'Finished writing data at %f' % (time.clock()-elapsed)
            prog_bar.selection = fn_idx * num_step + 8
            fn_idx += 1
        finally:
            df[fn].close()
            prog_bar.selection = 0
Ejemplo n.º 7
0
def __run_script__(fns):
    
    from Reduction import reduction
    from os.path import basename
    from os.path import join
    import time           #how fast are we going?
    import AddCifMetadata,output
    
    elapsed = time.clock()
    print 'Started working on Split Scans at %f' % (time.clock()-elapsed)
    df.datasets.clear()
    
    # check input
    if (fns is None or len(fns) == 0) :
        print 'no input datasets'
        return

    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        norm_tar = str(norm_target.value).lower()

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]     
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar
            
        # use provided reference value
        else:
            norm_tar = float(norm_tar)
            
    else:
        norm_ref = None
        norm_tar = None
    
    # check if bkg-map needs to be loaded
    if bkg_apply.value:
        if not bkg_map.value:
            bkg = None
            print 'WARNING: no bkg-map was specified'
        else:
            bkg = Dataset(str(bkg_map.value))
    else:
        bkg = None
    
    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            eff = reduction.read_efficiency_cif(str(eff_map.value))
            if eff.ndim != 2:
                raise AttributeError('eff.ndim != 2')
    else:
        eff = None
    
    # check if vertical tube correction needs to be loaded
    if vtc_apply.value:
        if not vtc_file.value:
            vtc = None
            print 'WARNING: no vtc-file was specified'
        else:
            vtc = str(vtc_file.value)
    else:
        vtc = None
    
    # check if horizontal tube correction needs to be loaded
    if htc_apply.value:
        if not htc_file.value:
            htc = None
            print 'WARNING: no htc-file was specified'
        else:
            htc = str(htc_file.value)
    else:
        htc = None
        
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    for fn in fns:
        # load dataset
        ds = df[fn]
        # extract basic metadata
        ds = reduction.AddCifMetadata.extract_metadata(ds)
        # remove redundant dimensions
        ds = ds.get_reduced()
        # check if normalized is required 
        if norm_ref:
            norm_tar = reduction.applyNormalization(ds, reference=norm_table[norm_ref], target=norm_tar)
        if bkg:
            ds = reduction.getBackgroundCorrected(ds, bkg, norm_table[norm_ref], norm_tar)
        
        print 'Finished normalisation, background subtraction at %f' % (time.clock()-elapsed)
        # check if vertical tube correction is required
        if vtc:
            ds = reduction.getVerticallyCorrected(ds, vtc)
        print 'Finished vertical offset correction at %f' % (time.clock()-elapsed)
        # check if efficiency correction is required
        if eff:
            ds = reduction.getEfficiencyCorrected(ds, eff)
        
        print 'Finished efficiency correction at %f' % (time.clock()-elapsed)
        # check if horizontal tube correction is required
        if htc:
            ds = reduction.getHorizontallyCorrected(ds, htc)

        print 'Finished horizontal correction at %f' % (time.clock()-elapsed)
        if vig_apply_rescale.value:
            ds = reduction.getVerticalIntegrated(ds, normalization=float(vig_rescale_target.value))
        else:
            ds = reduction.getVerticalIntegrated(ds)
        print 'Finished vertical integration at %f' % (time.clock()-elapsed)
        Plot1.clear()
        Plot1.set_dataset(ds)
        # output.write_cif_data(ds,join(str(out_folder.value), 'reduced_' + basename(str(fn))[:-7]))
        # Now write out each tube's data
        output.dump_tubes(ds,join(str(out_folder.value), 'split_' + basename(str(fn))[:-7]))
        print 'Finished writing data at %f' % (time.clock()-elapsed)
Ejemplo n.º 8
0
def __run_script__(fns):
    global Plot4,Plot5,Plot6
    from Reduction import reduction,AddCifMetadata
 
    from os.path import basename
    from os.path import join
    import time           #how fast are we going?
    from Formats import output
    
    elapsed = time.clock()
    print 'Started working at %f' % (time.clock()-elapsed)
    df.datasets.clear()
    
    # check input
    if (fns is None or len(fns) == 0) :
        print 'no input datasets'
        return

    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        norm_tar = str(norm_target.value).lower()

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]     
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar
            
        # use provided reference value
        else:
            norm_tar = float(norm_tar)
            
    else:
        norm_ref = None
        norm_tar = None

    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            eff_map_canonical = str(eff_map.value)
            if eff_map_canonical[0:5] != 'file:':
                eff_map_canonical = 'file:' + eff_map_canonical
            if not eff_map_canonical in eff_map_cache:
                eff_map_cache[eff_map_canonical] = reduction.read_efficiency_cif(eff_map_canonical)
            else:
                print 'Found in cache ' + `eff_map_canonical`
        eff = eff_map_cache[eff_map_canonical]
    else:
        eff = None
    
    # check if vertical tube correction needs to be loaded
    if vtc_apply.value:
        if not vtc_file.value:
            vtc = None
            print 'WARNING: no vtc-file was specified'
        else:
            vtc = str(vtc_file.value)
    else:
        vtc = None
    
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    for fn in fns:
        # load dataset
        ds = df[fn]
        # extract basic metadata
        ds = reduction.AddCifMetadata.extract_metadata(ds)
        # remove redundant dimensions
        rs = ds.get_reduced()
        rs.copy_cif_metadata(ds)
        # check if normalized is required 
        if norm_ref:
            ds,norm_tar = reduction.applyNormalization(rs, reference=norm_table[norm_ref], target=norm_tar)
        
        print 'Finished normalisation at %f' % (time.clock()-elapsed)
        # check if vertical tube correction is required
        if vtc:
            ds = reduction.getVerticallyCorrected(ds, vtc)
        print 'Finished vertical offset correction at %f' % (time.clock()-elapsed)
        # check if efficiency correction is required
        if eff:
            ds = reduction.getEfficiencyCorrected(ds, eff)
        
        print 'Finished efficiency correction at %f' % (time.clock()-elapsed)
        # check if we are recalculating gain
        if regain_apply.value:
            b = ds.intg(axis=1).get_reduced()  #reduce dimension
            ignore = regain_ignore.value    #Ignore first two tubes
            # Determine pixels per tube interval
            tube_pos = ds.axes[-1]
            tubesep = abs(tube_pos[0]-tube_pos[-1])/(len(tube_pos)-1)
            tube_steps = ds.axes[0]
            bin_size = abs(tube_steps[0]-tube_steps[-1])/(len(tube_steps)-1)
            pixel_step = int(round(tubesep/bin_size))
            bin_size = tubesep/pixel_step
            print '%f tube separation, %d steps before overlap, ideal binsize %f' % (tubesep,pixel_step,bin_size)
            # Reshape with individual sections summed
            c = b.reshape([b.shape[0]/pixel_step,pixel_step,b.shape[-1]])
            print `b.shape` + "->" + `c.shape`
            # sum the individual unoverlapped sections
            d = c.intg(axis=1)
            e = d.transpose()
            # we skip the first tubes' data as it is all zero
            # Get an initial average to start with
            bottom = vig_lower_boundary.value
            top = vig_upper_boundary.value
            resummed = ds[:,bottom:top,:]
            resummed = resummed.intg(axis=1).get_reduced()
            first_gain = array.ones(len(b.transpose())-ignore)
            first_ave,x,first_var = overlap.apply_gain(resummed.transpose()[ignore:,:],1.0/resummed.transpose().var[ignore:,:],pixel_step,first_gain, calc_var=True)
            if regain_unit_weights.value is True:
                weights = array.ones_like(e[ignore:])
            else:
                weights = 1.0/e[ignore:].var
            q= iterate_data(e[ignore:],weights,pixel_step=1,iter_no=int(regain_iterno.value))
            # Now we actually apply the vertical limits requested
           
            f,x, varf = overlap.apply_gain(resummed.transpose()[ignore:,:],1.0/resummed.transpose().var[ignore:,:],pixel_step,q[0],calc_var=True)
            # Get error for full dataset
            esds = overlap.calc_error_new(b.transpose()[ignore:,:],f,q[0],pixel_step)
            f = Dataset(f)
            f.title = "After scaling"
            f.var = varf
            # construct the ideal axes
            axis = arange(len(f))
            f.axes[0] = axis*bin_size + ds.axes[0][0] + ignore*pixel_step*bin_size
            f.copy_cif_metadata(ds)
            print `f.shape` + ' ' + `x.shape`
            Plot1.set_dataset(f)
            first_ave = Dataset(first_ave)
            first_ave.var = first_var
            first_ave.title = "Before scaling"
            first_ave.axes[0] = f.axes[0]
            Plot1.add_dataset(Dataset(first_ave))
            Plot4.set_dataset(Dataset(q[4]))
            fg = Dataset(q[0])
            fg.var = esds
            Plot5.set_dataset(fg)
            # show old esds
            fgold = Dataset(q[0])
            fgold.var = q[5]
            Plot5.add_dataset(fgold)
            residual_map = Dataset(q[3])
            try:
                Plot6.set_dataset(residual_map)
            except:
                pass
        print 'Finished regain calculation at %f' % (time.clock() - elapsed)
        # Output datasets
        filename_base = join(str(out_folder.value),str(output_stem.value) + basename(str(fn))[:-7])
        if output_cif.value:
            output.write_cif_data(f,filename_base)
        if output_xyd.value:
            output.write_xyd_data(f,filename_base)
        if output_fxye.value:
            output.write_fxye_data(f,filename_base)
        print 'Finished writing data at %f' % (time.clock()-elapsed)
def __run_script__(fns):
    
    from Reduction import reduction
    from os.path import basename
    from os.path import join
    import time           #how fast are we going?
    import AddCifMetadata,output
    
    elapsed = time.clock()
    print 'Started working at %f' % (time.clock()-elapsed)
    df.datasets.clear()
    
    # check input
    if (fns is None or len(fns) == 0) :
        print 'no input datasets'
        return

    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        norm_tar = str(norm_target.value).lower()

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]     
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar
            
        # use provided reference value
        else:
            norm_tar = float(norm_tar)
            
    else:
        norm_ref = None
        norm_tar = None
    
    # check if bkg-map needs to be loaded
    if bkg_apply.value:
        if not bkg_map.value:
            bkg = None
            print 'WARNING: no bkg-map was specified'
        else:
            bkg = Dataset(str(bkg_map.value))
    else:
        bkg = None
    
    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            if not eff_map.value in eff_map_cache:
                eff_map_cache[eff_map.value] = reduction.read_efficiency_cif(str(eff_map.value))
            else:
                print 'Found cached efficiency map ' + str(eff_map.value)
            eff = eff_map_cache[eff_map.value]
    else:
        eff = None
    
    # check if vertical tube correction needs to be loaded
    if vtc_apply.value:
        if not vtc_file.value:
            vtc = None
            print 'WARNING: no vtc-file was specified'
        else:
            vtc = str(vtc_file.value)
    else:
        vtc = None
    
    # check if horizontal tube correction needs to be loaded
    if htc_apply.value:
        if not htc_file.value:
            htc = None
            print 'WARNING: no htc-file was specified'
        else:
            htc = str(htc_file.value)
    else:
        htc = None
        
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    for fn in fns:
        # load dataset
        ds = df[fn]
        # extract basic metadata
        ds = reduction.AddCifMetadata.extract_metadata(ds)
        # remove redundant dimensions
        rs = ds.get_reduced()
        rs.copy_cif_metadata(ds)
        # check if normalized is required 
        if norm_ref:
            ds,norm_tar = reduction.applyNormalization(rs, reference=norm_table[norm_ref], target=norm_tar)
        if bkg:
            ds = reduction.getBackgroundCorrected(ds, bkg, norm_table[norm_ref], norm_tar)
        
        print 'Finished normalisation, background subtraction at %f' % (time.clock()-elapsed)
        # check if vertical tube correction is required
        if vtc:
            ds = reduction.getVerticallyCorrected(ds, vtc)
        print 'Finished vertical offset correction at %f' % (time.clock()-elapsed)
        # check if efficiency correction is required
        if eff:
            ds = reduction.getEfficiencyCorrected(ds, eff)
        
        print 'Finished efficiency correction at %f' % (time.clock()-elapsed)
        # check if horizontal tube correction is required
        if htc:
            ds = reduction.getHorizontallyCorrected(ds, htc)

        print 'Finished horizontal correction at %f' % (time.clock()-elapsed)

        # check if we are recalculating gain
        if regain_apply.value:
           print 'ds.has_key(ms): ' + `ds.__dict__.has_key('ms')`
           ds,gain,esds,chisquared = reduction.do_overlap(ds,regain_iterno.value)
           print 'Have new gains at %f' % (time.clock() - elapsed)
           Plot4 = Plot(title='Chi squared history')
           Plot5 = Plot(title='Final Gain')
           fg = Dataset(gain)
           fg.var = esds
           Plot4.set_dataset(Dataset(chisquared))   #chisquared history
           Plot5.set_dataset(fg)   #final gain plot
        # assemble dataset
        if ds.ndim > 2:
            asm_algo = str(asm_algorithm.value)
            if asm_algo == 'stitch frames':
                ds = reduction.getStitched(ds)
            elif asm_algo == 'sum frames':
                ds = reduction.getSummed(ds)
            else:
                print 'specify assemble algorithm'
                return
        # Display dataset
        print 'Finished stitching at %f' % (time.clock()-elapsed)
        Plot1.set_dataset(ds)
        Plot1.title = ds.title
        if vig_apply_rescale.value:
            ds = reduction.getVerticalIntegrated(ds, axis=0, normalization=float(vig_rescale_target.value),
                                                 cluster=float(vig_cluster.value))
        else:
            ds = reduction.getVerticalIntegrated(ds, axis=0, cluster=float(vig_cluster.value))
        print 'Finished vertical integration at %f' % (time.clock()-elapsed)
        # Display reduced dataset
        Plot2.set_dataset(ds)
        Plot2.title = ds.title
        # Output datasets
        filename_base = join(str(out_folder.value),str(output_stem.value) + basename(str(fn))[:-7])
        if output_cif.value:
            output.write_cif_data(ds,filename_base)
        if output_xyd.value:
            output.write_xyd_data(ds,filename_base)
        if output_fxye.value:
            output.write_fxye_data(ds,filename_base)
        # ds.save_copy(join(str(out_folder.value), 'reduced_' + basename(str(fn))))
        print 'Finished writing data at %f' % (time.clock()-elapsed)