Exemple #1
0
def __run_script__(fns):

    from Reduction import reduction, AddCifMetadata
    from os.path import basename
    from os.path import join
    import os, re
    import time  #how fast are we going?
    from Formats import output

    elapsed = time.clock()
    print 'Started working at %f' % (time.clock() - elapsed)
    df.datasets.clear()

    # save user preferences
    prof_names, prof_values = save_user_prefs()

    # store current Git versions for data output
    code_versions = {
        "GUI": __UI_gitversion[4:-1],
        "Reduction library": reduction.gitversion[4:-1]
    }
    # check input
    if (fns is None or len(fns) == 0):
        print 'no input datasets'
        return

    # pre-check that we can write the result
#    output_destination = out_folder.value
    output_destination = get_save_path()
    if output_xyd.value or output_fxye.value:
        if not os.access(output_destination, os.W_OK):
            open_error("Unable to write to folder %s" % output_destination)
            return
    # check if input needs to be normalized
    if norm_apply.value:
        # norm_ref is the source of information for normalisation
        # norm_tar is the value norm_ref should become,
        # by multiplication.  If 'auto', the maximum value of norm_ref
        # for the first dataset is used, otherwise any number may be entered.
        norm_ref = str(norm_reference.value)
        if norm_ref.strip() == '':
            open_error(
                "You have asked to apply normalisation but not specified any normalisation reference"
            )
            return
        norm_tar = str(norm_target).lower()

        # check if normalization target needs to be determined
        if len(norm_tar) == 0:
            norm_ref = None
            norm_tar = None
            print 'WARNING: no reference for normalization was specified'
        elif norm_tar == 'auto':
            # set flag
            norm_tar = -1
            # iterate through input datasets
            location = norm_table[norm_ref]
            print 'utilized reference value for "' + norm_ref + '" is:', norm_tar

        # use provided reference value
        else:
            norm_tar = float(norm_tar)

    else:
        norm_ref = None
        norm_tar = None

    # check if bkg-map needs to be loaded
    if bkg_apply.value:
        if not bkg_map.value:
            bkg = None
            print 'WARNING: no bkg-map was specified'
        else:
            bkg = Dataset(str(bkg_map.value))
    else:
        bkg = None

    # check if eff-map needs to be loaded
    if eff_apply.value:
        if not eff_map.value:
            eff = None
            print 'WARNING: no eff-map was specified'
        else:
            eff_map_canonical = get_calibration_path() + '/' + eff_map.value
            if eff_map_canonical[0:5] != 'file:':
                eff_map_canonical = 'file:' + eff_map_canonical
            if not eff_map_canonical in eff_map_cache:
                try:
                    eff_map_cache[
                        eff_map_canonical] = reduction.read_efficiency_cif(
                            eff_map_canonical)
                except:
                    open_error("Failed to read efficiency file %s" %
                               eff_map_canonical)
                    return
            else:
                print 'Found in cache ' + ` eff_map_canonical `
        eff = eff_map_cache[eff_map_canonical]
    else:
        eff = None

    # check if vertical tube correction needs to be loaded
    if vtc_apply.value:
        if not vtc_file.value:
            vtc = None
            print 'WARNING: no vtc-file was specified'
        else:
            vtc = get_calibration_path() + '/' + str(vtc_file.value)
    else:
        vtc = None
    # check if horizontal tube correction needs to be loaded
    if htc_apply.value:
        if not htc_file.value:
            htc = None
            print 'WARNING: no htc-file was specified'
        else:
            htc = get_calibration_path() + '/' + str(htc_file.value)
    else:
        htc = None

    reduced_files = []
    # iterate through input datasets
    # note that the normalisation target (an arbitrary number) is set by
    # the first dataset unless it has already been specified.
    for fn in fns:
        # load dataset
        ds = df[fn]
        # extract basic metadata
        ds = AddCifMetadata.extract_metadata(ds, codeversions=code_versions)
        AddCifMetadata.store_reduction_preferences(ds, prof_names, prof_values)
        # remove redundant dimensions and convert to floating point
        rs = ds.get_reduced() * 1.0
        rs.copy_cif_metadata(ds)
        # check if normalized is required
        if norm_ref:
            ds, norm_tar = reduction.applyNormalization(
                rs, reference=norm_table[norm_ref], target=norm_tar)
        else:
            ds = rs
        if bkg:
            ds = reduction.getBackgroundCorrected(ds, bkg,
                                                  norm_table[norm_ref],
                                                  norm_tar)

        print 'Finished normalisation, background subtraction at %f' % (
            time.clock() - elapsed)
        # check if vertical tube correction is required
        if vtc:
            ds = reduction.getVerticallyCorrected(ds, vtc)
        print 'Finished vertical offset correction at %f' % (time.clock() -
                                                             elapsed)
        # check if efficiency correction is required
        if eff:
            ds = reduction.getEfficiencyCorrected(ds, eff)

        print 'Finished efficiency correction at %f' % (time.clock() - elapsed)
        # Before fiddling with axes, get the ideal stepsize
        stepsize = reduction.get_stepsize(ds)
        print 'Ideal stepsize determined to be %f' % stepsize
        # check if horizontal tube correction is required
        if htc:
            ds = reduction.getHorizontallyCorrected(ds, htc)

        print 'Finished horizontal correction at %f' % (time.clock() - elapsed)
        # Stitching. If we are recalculating gain, this is purely for
        # informational purposes. We don't want to take the 100x time penalty of
        # multiplying a 2D array by the gain factor for each tube, so we
        # stitch on a 1D array after doing the gain re-refinement.
        if ds.ndim > 2:
            stitched = reduction.getStitched(ds,
                                             ignore=str(asm_drop_frames.value))
        # Display dataset
        print 'Finished stitching at %f' % (time.clock() - elapsed)
        Plot1.set_dataset(stitched)
        Plot1.title = stitched.title
        # check if we are recalculating gain
        if regain_apply.value:
            bottom = int(vig_lower_boundary.value)
            top = int(vig_upper_boundary.value)
            cs, gain, esds, chisquared, no_overlaps = reduction.do_overlap(
                ds,
                regain_iterno.value,
                bottom=bottom,
                top=top,
                exact_angles=htc,
                drop_frames=str(asm_drop_frames.value))
            if cs is not None:
                print 'Have new gains at %f' % (time.clock() - elapsed)
                fg = Dataset(gain)
                fg.var = esds
            # set horizontal axis (ideal values)


#               Plot4.set_dataset(Dataset(chisquared))   #chisquared history
#               Plot5.set_dataset(fg)   #final gain plot
            else:
                open_error(
                    "Cannot do gain recalculation as the scan ranges do not overlap."
                )
                return
        if not vig_apply_rescale.value:
            norm_const = -1.0
        else:
            norm_const = float(vig_rescale_target.value)
        # set the cluster value
        if str(vig_cluster.value) in ['Merge', 'Sum']:
            cluster = (stepsize * 0.6, str(vig_cluster.value)
                       )  #60 percent of ideal
        else:
            cluster = (0.0, 'None')
        if not regain_apply.value:  #already done
            final_result = reduction.getVerticalIntegrated(
                stitched,
                axis=0,
                normalization=norm_const,
                cluster=cluster,
                bottom=int(vig_lower_boundary.value),
                top=int(vig_upper_boundary.value))
            print 'Finished vertical integration at %f' % (time.clock() -
                                                           elapsed)
        else:
            if str(vig_cluster.value
                   ) == 'Sum':  #simulate a sum for the gain recalculated value
                cs *= no_overlaps
                info_string = "\nFinal values were multiplied by %d to simulate summation of individual points." % no_overlaps
                cs.add_metadata("_pd_proc_info_data_reduction",
                                info_string,
                                append=True)
            final_result = cs
        # Display reduced dataset
        send_to_plot(final_result, Plot2)
        if copy_acc.value:  #user wants us to accumulate it
            plh_copy_proc()
        # Output datasets
        # Calculate inserted string: %s for sample name, %t for temperature
        stem = str(output_stem.value)
        stem = re.sub(r'[^\w+=()*^@~:{}\[\].%-]', '_', stem)
        if '%s' in stem:
            samplename = final_result.harvest_metadata(
                "CIF")['_pd_spec_special_details']
            name_front = samplename.split()[0]
            stem = stem.replace('%s', name_front)
        if '%t' in stem:
            temperature = 'Unknown_temperature'
            stem = stem.replace('%t', temperature)
        print 'Filename stem is now ' + stem
        filename_base = join(get_save_path(),
                             basename(str(fn))[:-7] + '_' + stem)
        if output_xyd.value or output_fxye.value:  #write CIF if other files written
            output.write_cif_data(final_result, filename_base)
            reduced_files.append(filename_base + '.cif')
        if output_xyd.value:
            output.write_xyd_data(final_result,
                                  filename_base,
                                  codeversions=code_versions)
            reduced_files.append(filename_base + '.xyd')
        if output_fxye.value:
            output.write_fxye_data(final_result,
                                   filename_base,
                                   codeversions=code_versions)
            reduced_files.append(filename_base + '.xye')

        # ds.save_copy(join(str(out_folder.value), 'reduced_' + basename(str(fn))))
        print 'Finished writing data at %f' % (time.clock() - elapsed)

    if len(reduced_files) > 0:
        zip_files(reduced_files,
                  'Echidna_rd_' + str(int(time.time()))[2:] + '.zip')