def __run_script__(fns):
    
    from Reduction import reduction, AddCifMetadata
    from os.path import basename
    from os.path import join
    from Formats import output
    import re
    
    df.datasets.clear()
    
    # save user preferences
    prof_names,prof_values = save_user_prefs()

    # check input
    if (fns is None or len(fns) == 0) :
        print 'no input datasets'
        return

    # set the title for Plot2
    # Plot2.title = 'Plot 2'
    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        norm_tar = norm_target

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]     
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar
            
        # use provided reference value
        else:
            norm_tar = float(norm_tar)
            
    else:
        norm_ref = None
        norm_tar = None
    
    # check if bkg-map needs to be loaded
    if bkg_apply.value:
        if not bkg_map.value:
            bkg = None
            print 'WARNING: no bkg-map was specified'
        else:
            bkg = Dataset(str(bkg_map.value))
            # to avoid complaints in routines that expect it
            reduction.AddCifMetadata.add_metadata_methods(bkg)
    else:
        bkg = None
    
    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            eff = Dataset(str(eff_map.value))
    else:
        eff = None
    # Check for rescale
    if vig_apply_rescale.value:
        vig_normalisation = float(vig_rescale_target.value)
    else:
        vig_normalisation = -1
    group_val = grouping_options[str(output_grouping.value)]
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    for fn in fns:
        # load dataset
        ds = df[fn]
        # extract basic metadata
        ds = reduction.AddCifMetadata.extract_metadata(ds)
        try:
             stth_value = sum(ds.stth)/len(ds.stth) # save for later
             all_stth = ds.stth[:] # also save for later
        except TypeError:
            stth_value = ds.stth
        if ds.ndim > 3:
            rs = ds.get_reduced()
        else:
            rs = ds
        rs = rs * 1.0  #convert to float
        rs.copy_cif_metadata(ds)
        # check if normalized is required 
        if norm_ref:
            norm_tar = reduction.applyNormalization(rs, reference=norm_table[norm_ref], target=norm_tar)
        if bkg:
            rs = reduction.getBackgroundCorrected(rs, bkg, norm_table[norm_ref], norm_tar)
        # check if efficiency correction is required
        assert rs.dtype == Array([1.2,1.3]).dtype
        if eff:
            ds = reduction.getEfficiencyCorrected(rs, eff)
        else:
            ds = rs
        # Calculate inserted string: %s for sample name, %t for temperature
        stem = str(output_stem.value)
        stem = re.sub(r'[^\w+=()*^@~:{}\[\].%-]','_',stem)
        if '%s' in stem:
             samplename = ds.harvest_metadata("CIF")['_pd_spec_special_details']
             name_front = re.sub(r'[^\w+=()*^@~:{}\[\].%-]','_',samplename)
             stem = stem.replace('%s',name_front)
        if '%t1' in stem:
             # get tc1
             temperature = df[fn]["/entry1/sample/tc1/sensor/sensorValueA"]
             print `temperature`
             try:
                 avetemp = sum(temperature)/len(temperature)
             except TypeError:
                 avetemp = temperature
             stem = stem.replace('%t1',"%.0fK" % avetemp)
        print 'Filename stem is now ' + stem
        # restrict output set of frames
        restrict_spec = str(output_restrict.value)
        if ':' in restrict_spec:
            first,last = map(int,restrict_spec.split(':'))
            start_frames = last
            current_frame_start = first
            frameno = first
        else:
            start_frames = len(ds)
            current_frame_start = 0
            frameno = 0
        # perform grouping of sequential input frames   
        while frameno <= start_frames:
            if group_val is None:
                target_val = ""
                final_frame = start_frames-1
                frameno = start_frames
            else:
                stth_value = all_stth[current_frame_start]
                target_val = ds[group_val][current_frame_start]
                try:
                    if ds[frameno][group_val] == target_val:
                        frameno += 1
                        continue
                except:   #Assume an exception is due to too large frameno
                    print 'Exiting frame loop due to error'
            # frameno is the first frame with the wrong values
            cs = ds.get_section([current_frame_start,0,0],[frameno-current_frame_start,ds.shape[1],ds.shape[2]])
            cs.copy_cif_metadata(ds)
            print 'Summing frames from %d to %d, shape %s, start 2th %f' % (current_frame_start,frameno-1,cs.shape,stth_value)
            if target_val != "":
                print 'Corresponding to a target value of ' + `target_val`
            # sum the input frames
            print 'cs axes: ' + cs.axes[0].title + ' ' + cs.axes[1].title + ' ' + cs.axes[2].title
            # es = cs.intg(axis=0)
            es = reduction.getSummed(cs,applyStth=stth_value)  # does axis correction as well
            es.copy_cif_metadata(cs)
            print 'es axes: ' + `es.axes[0].title` + es.axes[1].title
            Plot1.set_dataset(es)
            cs = reduction.getVerticalIntegrated(es, axis=0, normalization=vig_normalisation,
                                                     bottom = int(vig_lower_boundary.value),
                                                     top=int(vig_upper_boundary.value))
            if target_val != "":
                cs.title = cs.title + "_" + str(target_val)
            try:
                send_to_plot(cs,Plot2,add=True,change_title=False)
            except IndexError:  #catch error from GPlot
                send_to_plot(cs,Plot2,add=False,change_title=True)
            # Output datasets
            filename_base = join(str(out_folder.value),basename(str(fn))[:-7]+'_'+stem+"_"+str(target_val))
            if output_cif.value:
                output.write_cif_data(cs,filename_base)
            if output_xyd.value:
                output.write_xyd_data(cs,filename_base)
            if output_fxye.value:
                output.write_fxye_data(cs,filename_base)
            #loop to next group of datasets
            current_frame_start = frameno
            frameno += 1
def __run_script__(fns):
    
    from Reduction import reduction
    from os.path import basename
    from os.path import join
    import time           #how fast are we going?
    import AddCifMetadata,output
    
    elapsed = time.clock()
    print 'Started working at %f' % (time.clock()-elapsed)
    df.datasets.clear()
    
    # check input
    if (fns is None or len(fns) == 0) :
        print 'no input datasets'
        return

    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        norm_tar = str(norm_target.value).lower()

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]     
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar
            
        # use provided reference value
        else:
            norm_tar = float(norm_tar)
            
    else:
        norm_ref = None
        norm_tar = None
    
    # check if bkg-map needs to be loaded
    if bkg_apply.value:
        if not bkg_map.value:
            bkg = None
            print 'WARNING: no bkg-map was specified'
        else:
            bkg = Dataset(str(bkg_map.value))
    else:
        bkg = None
    
    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            if not eff_map.value in eff_map_cache:
                eff_map_cache[eff_map.value] = reduction.read_efficiency_cif(str(eff_map.value))
            else:
                print 'Found cached efficiency map ' + str(eff_map.value)
            eff = eff_map_cache[eff_map.value]
    else:
        eff = None
    
    # check if vertical tube correction needs to be loaded
    if vtc_apply.value:
        if not vtc_file.value:
            vtc = None
            print 'WARNING: no vtc-file was specified'
        else:
            vtc = str(vtc_file.value)
    else:
        vtc = None
    
    # check if horizontal tube correction needs to be loaded
    if htc_apply.value:
        if not htc_file.value:
            htc = None
            print 'WARNING: no htc-file was specified'
        else:
            htc = str(htc_file.value)
    else:
        htc = None
        
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    for fn in fns:
        # load dataset
        ds = df[fn]
        # extract basic metadata
        ds = reduction.AddCifMetadata.extract_metadata(ds)
        # remove redundant dimensions
        rs = ds.get_reduced()
        rs.copy_cif_metadata(ds)
        # check if normalized is required 
        if norm_ref:
            ds,norm_tar = reduction.applyNormalization(rs, reference=norm_table[norm_ref], target=norm_tar)
        if bkg:
            ds = reduction.getBackgroundCorrected(ds, bkg, norm_table[norm_ref], norm_tar)
        
        print 'Finished normalisation, background subtraction at %f' % (time.clock()-elapsed)
        # check if vertical tube correction is required
        if vtc:
            ds = reduction.getVerticallyCorrected(ds, vtc)
        print 'Finished vertical offset correction at %f' % (time.clock()-elapsed)
        # check if efficiency correction is required
        if eff:
            ds = reduction.getEfficiencyCorrected(ds, eff)
        
        print 'Finished efficiency correction at %f' % (time.clock()-elapsed)
        # check if horizontal tube correction is required
        if htc:
            ds = reduction.getHorizontallyCorrected(ds, htc)

        print 'Finished horizontal correction at %f' % (time.clock()-elapsed)

        # check if we are recalculating gain
        if regain_apply.value:
           print 'ds.has_key(ms): ' + `ds.__dict__.has_key('ms')`
           ds,gain,esds,chisquared = reduction.do_overlap(ds,regain_iterno.value)
           print 'Have new gains at %f' % (time.clock() - elapsed)
           Plot4 = Plot(title='Chi squared history')
           Plot5 = Plot(title='Final Gain')
           fg = Dataset(gain)
           fg.var = esds
           Plot4.set_dataset(Dataset(chisquared))   #chisquared history
           Plot5.set_dataset(fg)   #final gain plot
        # assemble dataset
        if ds.ndim > 2:
            asm_algo = str(asm_algorithm.value)
            if asm_algo == 'stitch frames':
                ds = reduction.getStitched(ds)
            elif asm_algo == 'sum frames':
                ds = reduction.getSummed(ds)
            else:
                print 'specify assemble algorithm'
                return
        # Display dataset
        print 'Finished stitching at %f' % (time.clock()-elapsed)
        Plot1.set_dataset(ds)
        Plot1.title = ds.title
        if vig_apply_rescale.value:
            ds = reduction.getVerticalIntegrated(ds, axis=0, normalization=float(vig_rescale_target.value),
                                                 cluster=float(vig_cluster.value))
        else:
            ds = reduction.getVerticalIntegrated(ds, axis=0, cluster=float(vig_cluster.value))
        print 'Finished vertical integration at %f' % (time.clock()-elapsed)
        # Display reduced dataset
        Plot2.set_dataset(ds)
        Plot2.title = ds.title
        # Output datasets
        filename_base = join(str(out_folder.value),str(output_stem.value) + basename(str(fn))[:-7])
        if output_cif.value:
            output.write_cif_data(ds,filename_base)
        if output_xyd.value:
            output.write_xyd_data(ds,filename_base)
        if output_fxye.value:
            output.write_fxye_data(ds,filename_base)
        # ds.save_copy(join(str(out_folder.value), 'reduced_' + basename(str(fn))))
        print 'Finished writing data at %f' % (time.clock()-elapsed)