Exemplo n.º 1
0
def calc_eff_naive(vanad,backgr,norm_ref="bm3_counts",var_cutoff=3.0,low_frame=0,
                   high_frame=967):
    """Calculate efficiencies given vanadium and background hdf files. 

    The approach of this simple version is, after subtracting background, to find the sum of
    all the frames and assume that this is proportional to the gain.

    norm_ref is the source of normalisation counts for putting each frame and each dataset onto a
    common scale. Pixels greater than esd_cutoff*error will not contribute.
    """

    import stat,datetime,time,math
    starttime = time.time()
    omega = vanad["mom"][0]  # for reference
    takeoff = vanad["mtth"][0]
    # TODO: intelligent determination of Wombat wavelength
    #crystal = AddCifMetadata.pick_hkl(omega-takeoff/2.0,"335")  #post April 2009 used 335 only
    #
    # Get important information from the basic files
    #
    # Get file times from timestamps as older NeXuS files had bad values here
    #
    #wl = AddCifMetadata.calc_wavelength(crystal,takeoff)
    vtime = os.stat(vanad.location)[stat.ST_CTIME]
    vtime = datetime.datetime.fromtimestamp(vtime)
    vtime = vtime.strftime("%Y-%m-%dT%H:%M:%S%z")
    btime = os.stat(backgr.location)[stat.ST_CTIME]
    btime = datetime.datetime.fromtimestamp(btime)
    btime = btime.strftime("%Y-%m-%dT%H:%M:%S%z")
    # This step required to insert our metadata hooks into the dataset object
    AddCifMetadata.add_metadata_methods(vanad)
    AddCifMetadata.add_metadata_methods(backgr)
    # Fail early
    print 'Using %s and %s' % (str(vanad.location),str(backgr.location))
    # Subtract the background if requested
    check_val = backgr[8,64,64]
    if norm_ref is not None:
        norm_target = reduction.applyNormalization(vanad,norm_ref,-1)
        # store for checking later
        check_val = backgr[8,64,64]
        nn = reduction.applyNormalization(backgr,norm_ref,norm_target)
        # 
        print 'Normalising background to %f'  % norm_target
    pure_vanad = (vanad - backgr).get_reduced()    #remove the annoying 2nd dimension
    # drop any frames that have been requested
    if low_frame != 0 or high_frame < len(pure_vanad):
        pure_vanad = pure_vanad[low_frame:high_frame]
        print 'Only using part of supplied data: %d to %d, new length %d' % (low_frame,high_frame,len(pure_vanad))
    # pure_vanad.copy_cif_metadata(vanad)
    print 'Check: %f, %f -> %f' % (vanad[8,64,64],check_val,pure_vanad[8,64,64])
    # This is purely for the fudge map
    d1,d2,d3,fudge_map = nonzero_gain(pure_vanad)
    pure_vanad = pure_vanad.intg(axis=0)   # sum over the detector step axis
    # calculate typical variability across the detector
    ave = pure_vanad.sum()/pure_vanad.size
    stdev = math.sqrt(((pure_vanad-ave)**2).sum()/pure_vanad.size)
    whi = ave + var_cutoff*stdev
    wlo = ave - var_cutoff*stdev
    eff_array = array.zeros_like(pure_vanad)
    eff_array[pure_vanad< whi and pure_vanad > wlo] = pure_vanad
    eff_array[pure_vanad > 0] = 1.0/eff_array
    eff_error = pure_vanad.var * (eff_array**4)
    ave_eff = eff_array.sum()/(eff_array.size)
    eff_array /= ave_eff
    eff_error /= ave_eff**2
    # pixel OK map...anything less than var_cutoff from the average
    pix_ok_map = array.ones_like(eff_array)
    pix_ok_map[pure_vanad > whi or pure_vanad < wlo] = 0.0  
    print "Variance not OK pixels %d" % (pix_ok_map.sum() - pix_ok_map.size)
    final_map = Dataset(eff_array)
    final_map.var = eff_error
    return final_map, pix_ok_map, fudge_map
Exemplo n.º 2
0
def calc_eff_mark2(vanad,backgr,norm_ref="bm3_counts",bottom = 0, top = 127,
                   low_frame=0,high_frame=967):
    """Calculate efficiencies given vanadium and background hdf files. 

    The approach of this new version is to calculate relative efficiencies for each pixel at each step,
    then average them at the end.  This allows us to account for variations in illumination as
    a function of time, and allows us to remove V coherent peaks.  It also gives us a 
    decent estimate of the error.

    norm_ref is the source of normalisation counts for putting each frame and each dataset onto a
    common scale. Top and bottom are the upper and lower limits for a sensible signal. """

    import stat,datetime,time,sys
    starttime = time.time()
    omega = vanad["mom"][0]  # for reference
    takeoff = vanad["mtth"][0]
    # TODO: intelligent determination of Wombat wavelength
    #crystal = AddCifMetadata.pick_hkl(omega-takeoff/2.0,"335")  #post April 2009 used 335 only
    #
    # Get important information from the basic files
    #
    # Get file times from timestamps as older NeXuS files had bad values here
    #
    #wl = AddCifMetadata.calc_wavelength(crystal,takeoff)
    vtime = os.stat(vanad.location)[stat.ST_CTIME]
    vtime = datetime.datetime.fromtimestamp(vtime)
    vtime = vtime.strftime("%Y-%m-%dT%H:%M:%S%z")
    btime = os.stat(backgr.location)[stat.ST_CTIME]
    btime = datetime.datetime.fromtimestamp(btime)
    btime = btime.strftime("%Y-%m-%dT%H:%M:%S%z")
    # This step required to insert our metadata hooks into the dataset object
    AddCifMetadata.add_metadata_methods(vanad)
    AddCifMetadata.add_metadata_methods(backgr)
    # Fail early
    print 'Using %s and %s' % (str(vanad.location),str(backgr.location))
    # Subtract the background
    if norm_ref is not None:
        norm_target = reduction.applyNormalization(vanad,norm_ref,-1)
        # store for checking later
        check_val = backgr[8,64,64]
        nn = reduction.applyNormalization(backgr,norm_ref,norm_target)
        # 
        print 'Normalising background to %f'  % norm_target
    pure_vanad = (vanad - backgr).get_reduced()    #remove the annoying 2nd dimension
    # drop any frames that have been requested
    if low_frame != 0 or high_frame < len(pure_vanad):
        pure_vanad = pure_vanad[low_frame:high_frame]
        print 'Only using part of supplied data: %d to %d, new length %d' % (low_frame,high_frame,len(pure_vanad))
    stth = pure_vanad.stth   #store for later
    # pure_vanad.copy_cif_metadata(vanad)
    print 'Check: %f, %f -> %f' % (vanad[8,64,64],check_val,pure_vanad[8,64,64])
    nosteps = pure_vanad.shape[0]
    # generate a rough correction
    simple_vanad = pure_vanad.intg(axis=0)   # sum over the detector step axis
    # calculate typical variability across the detector
    eff_array = array.zeros_like(simple_vanad)
    eff_array[simple_vanad > 10] = simple_vanad
    eff_array = eff_array*eff_array.size/eff_array.sum()
    eff_array[simple_vanad > 0] = 1.0/eff_array
    # apply this temporary correction to last frame which we expect to have the most
    # peaks, as no V peaks will be at low enough angle to fall off the
    # detector during scanning. If this assumption is incorrect, a more
    # rigourous routine could do this twice, for the first and last frames
    frame_last = (pure_vanad.storage[nosteps-1]*eff_array).intg(axis=0)  #sum vertically
    print 'Final frame max, min values after correction: %f %f' % (max(frame_last),min(frame_last))# find the peaks, get a background too
    peak_list,back_lev = peak_find(frame_last)
    # Prepare return information
    peak_pos = [(stth[nosteps-1]+a*0.125,b*0.125) for (a,b) in peak_list]
    info_list = "List of peaks found and purged:\n Position  Purge range"
    for pos,fwhm in peak_pos:
        info_list += "%8.2f  %8.2f\n" % (pos,fwhm)
    # Remove these peaks from all frames.
    # degrees. The step size is...
    step_size = (stth[nosteps-1]-stth[0])/(nosteps-1)
    print 'Found step size of %f' % step_size
    # Remove all peaks from the pure data
    purged = peak_scrub(pure_vanad,peak_list,step_size,start_at_end=True)
    # Get gain based on pixels above quarter background
    eff_array,eff_error,non_zero_contribs,fudge_map = nonzero_gain(purged,back_lev/(pure_vanad.shape[1]*4))
    final_map = Dataset(eff_array)
    final_map.var = eff_error
    return final_map,non_zero_contribs,fudge_map,frame_last  #last frame, for reference
Exemplo n.º 3
0
def calc_eff_mark2(vanad,backgr,v_off,edge=[(0,50),(1,25)],norm_ref="bm3_counts",bottom = 24, top = 104, 
    detail=None,splice=None):
    """Calculate efficiencies given vanadium and background hdf files.  If detail is
    some integer, detailed calculations for that tube will be displayed. Edge is a
    list of tuples ((tube_no,step),) before which the tube is assumed to be blocked and therefore
    data are unreliable. All efficiencies in this area are set to 1.  A value for step larger
    than the total steps will result in zero efficiency for this tube overall. A splicing operation
    merges files in backgr by substituting the first splice steps of the first file with
    the first splice steps of the second file.

    The approach of this new version is to calculate relative efficiencies for each pixel at each step,
    then average them at the end.  This allows us to account for variations in illumination as
    a function of time, and to simply remove V coherent peaks rather than replace them with 
    neighbouring values.  It also gives us a decent estimate of the error.

    norm_ref is the source of normalisation counts for putting each frame and each dataset onto a
    common scale. Top and bottom are the upper and lower limits for a sensible signal."""

    import stat,datetime
    omega = vanad.mom[0]  # for reference
    takeoff = vanad.mtth[0]
    crystal = AddCifMetadata.pick_hkl(omega-takeoff/2.0,"335")  #post April 2009 used 335 only
    #
    # Get important information from the basic files
    #
    # Get file times from timestamps as older NeXuS files had bad values here
    #
    wl = AddCifMetadata.calc_wavelength(crystal,takeoff)
    vtime = os.stat(vanad.location)[stat.ST_CTIME]
    vtime = datetime.datetime.fromtimestamp(vtime)
    vtime = vtime.strftime("%Y-%m-%dT%H:%M:%S%z")
    btime = os.stat(backgr.location)[stat.ST_CTIME]
    btime = datetime.datetime.fromtimestamp(btime)
    btime = btime.strftime("%Y-%m-%dT%H:%M:%S%z")
    v_loc = str(vanad.location)
    b_loc = str(backgr.location)
    # Check for missed steps
    b_missing = determine_missed_steps(backgr,tolerance=0.5)
    v_missing = determine_missed_steps(vanad,tolerance=0.85)
    all_missing = set(b_missing) | set(v_missing)
    # This step required to insert our metadata hooks into the dataset object
    AddCifMetadata.add_metadata_methods(vanad)
    AddCifMetadata.add_metadata_methods(backgr)
    # Fail early
    print 'Using %s and %s' % (v_loc,b_loc)
    # Subtract the background
    vanad,norm_target = reduction.applyNormalization(vanad,norm_ref,-1)
    # store for checking later
    check_val = backgr[12,64,64]
    backgr,nn = reduction.applyNormalization(backgr,norm_ref,norm_target)
    # 
    print 'Normalising background to %f'  % norm_target
    pure_vanad = (vanad - backgr).get_reduced()    #remove the annoying 2nd dimension
    pure_vanad.copy_cif_metadata(vanad)
    print 'Check: %f, %f -> %f' % (vanad[12,64,64],check_val,pure_vanad[12,64,64])
    #
    # move the vertical pixels to correct positions
    #
    pure_vanad = reduction.getVerticallyCorrected(pure_vanad,v_off)
    nosteps = pure_vanad.shape[0]
    # now we have to get some efficiency numbers out.  We will have nosteps 
    # observations of each value, if nothing is blocked or scrubbed.   We obtain a
    # relative efficiency for every pixel at each height, and then average to
    # get a mean efficiency together with a standard deviation.
    #
    # We output a multiplier used for normalisation, so we need the inverse
    # of the observed relative value
    #
    # remember the structure of our data: the leftmost index is the vertical
    # pixel number, the right is the angle,
    eff_array = array.zeros(pure_vanad[0].shape)
    eff_error = array.zeros(pure_vanad[0].shape)
    # keep a track of excluded tubes by step to work around lack of count_zero
    # support
    tube_count = array.ones(pure_vanad.shape[0]) * pure_vanad.shape[-1]
    # Now zero out blocked areas. The first bs steps are blocked on tube bt.
    # We are assuming no overlap with V peaks later
    for bt,bs in edge:
        pure_vanad[0:bs,:,bt] = 0
        tube_count[0:bs] = tube_count[0:bs] - 1
    # Now zero out vanadium peaks
    sv_boundaries = scrub_vanad_pos(pure_vanad,takeoff,crystal,nosteps=nosteps)
    for bt,bstart,bfinish in sv_boundaries:
        pure_vanad[bstart:bfinish,:,bt] = 0
        tube_count[bstart:bfinish] = tube_count[bstart:bfinish] - 1
    # Now remove any steps (for all tubes) where there was no signal
    # Gumpy doesn't provide delete method, so we create a new array
    new_shape = pure_vanad.shape
    new_shape[0] = new_shape[0]-len(all_missing)
    new_vanad = zeros(new_shape)
    new_tubecount = array.zeros(new_shape[0])
    keepers = list(set(range(nosteps))-all_missing)
    keepers.sort()   #to put things in the right order
    new_stepno = 0
    for keeper in keepers:
        new_vanad[new_stepno,:,:] = pure_vanad[keeper,:,:]
        new_tubecount[new_stepno] = tube_count[keeper]
        new_stepno += 1
    # And move everything back to the original names...
    pure_vanad = new_vanad
    tube_count = new_tubecount
    print 'Removed %d steps due to low monitor counts: %s' % (len(all_missing),`all_missing`)
    # Now zero out excluded regions
    pure_vanad = pure_vanad[:,bottom:top,:]
    print "Tube count by step: " + `tube_count.tolist()`
     # For each detector position, calculate a factor relative to the mean observed intensity
    # at that step.
    step_sum = pure_vanad.sum(0) #total counts at each step - meaning is different to numpy
    average = step_sum/(tube_count * (top - bottom))  #average value for gain normalisation
    print "Average intensity seen at each step: " + `average.tolist()`
    # No broadcasting, have to be clever.  We have to keep our storage in
    # gumpy, not Jython, so we avoid creating large jython lists by not
    # using map.
    step_gain = ones(pure_vanad.shape)
    for new,old,av in zip(range(len(step_gain)),pure_vanad,average):
        step_gain[new] = old/av
    step_gain = step_gain.transpose()  # so now have [tubeno,vertical,step]
    # Now each point in step gain is the gain of this pixel at that step, using
    # the total counts at that step as normalisation
    # We add the individual observations to obtain the total gain...
    # Note that we have to reshape in order to make the arrays an array of vectors so that
    # mean and covariance will work correctly.  After the reshape + transpose below, we
    # have shape[1]*shape[2] vectors that are shape[0] (ie number of steps) long.
    gain_as_vectors = step_gain.reshape([step_gain.shape[0],step_gain.shape[1]*step_gain.shape[2]]) 
    gain_as_vectors = gain_as_vectors.transpose()
    # count the non-zero contributions
    nonzero_contribs = zeros(gain_as_vectors.shape,dtype=float)
    nonzero_contribs[gain_as_vectors>0] = 1.0
    nz_sum = nonzero_contribs.sum(axis=0)
    gain_sum = gain_as_vectors.sum(axis=0)
    total_gain = array.ones_like(gain_sum)
    total_gain[nz_sum>0] = gain_sum/nz_sum
    final_gain = total_gain.reshape([step_gain.shape[1],step_gain.shape[2]])
    print 'We have total gain: ' + `final_gain`
    print 'Shape ' + `final_gain.shape`
    print 'Min observations per point: %f' % nz_sum.min()
    print 'nz_sum beginning: ' + str(nz_sum.storage[0:200])
    import time
    elapsed = time.clock()
    # efficiency speedup; we would like to write
    # eff_array[:,bottom:top] = 1.0/final_gain
    # but anything in gumpy with square brackets goes crazy slow.
    eff_array_sect = eff_array.get_section([0,bottom],[eff_array.shape[0],top-bottom])
    eas_iter = eff_array_sect.item_iter()
    fgi = final_gain.item_iter()
    while eas_iter.has_next():
        eas_iter.set_next(1.0/fgi.next())
    print 'Efficiency array setting took %f' % (time.clock() - elapsed)
    print 'Check: element (64,64) is %f' % (eff_array[64,64])
    # Calculate the covariance of the final sum as the covariance of the
    # series of observations, divided by the number of observations
    cov_array = zeros(gain_as_vectors.shape,dtype=float)
    # Following is necessary to match dimensions
    total_gain = total_gain.reshape([total_gain.shape[0],1])
    print 'Shapes: ' + `cov_array[:,0].shape` + `gain_as_vectors[:,0].shape` + `total_gain.shape`
    for step in xrange(gain_as_vectors.shape[1]):
        # print 'Covariance step %d' % step
        cov_array[:,step] = (gain_as_vectors[:,step] - total_gain)**2
    # Now ignore the points that are not observed before summing
    cov_array[gain_as_vectors<=0] = 0
    cov_sum = cov_array.sum(axis=0)
    cov_result = cov_sum/(nz_sum - 1)
    covariances = cov_result.reshape([step_gain.shape[1],step_gain.shape[2]])
    print 'We have covariances too! ' + `covariances.shape`
    print 'Writing to eff_error, shape ' + `eff_error[:,bottom:top].shape`
    #   eff_error[tube_no] = (variance*(inverse_val**4))
    # We want to write...
    # eff_error[:,bottom:top] = covariances*(eff_array[:,bottom:top]**4)
    # but for a speed-up we write...
    eff_error_sect = eff_error.get_section([0,bottom],[eff_error.shape[0],top-bottom])
    easi = eff_error_sect.item_iter()
    covi = covariances.item_iter()
    effi = eff_array_sect.item_iter()
    while easi.has_next():
        easi.set_next(covi.next()*(effi.next()**4))
    # pixel OK map...anything with positive efficiency but variance is no 
    # greater than the efficiency (this latter is arbitrary)return eff_array
    ok_pixels = zeros(eff_array.shape,dtype=int)
    ok_pixels[eff_array>0]=1
    pix_ok_map = zeros(eff_error.shape,dtype=int)
    pix_ok_map[eff_error > eff_array]=1
    print "OK pixels %d" % ok_pixels.sum() 
    print "Variance not OK pixels %d" % pix_ok_map.sum()
    # Now fix our output arrays to put dodgy pixels to one
    eff_array[eff_error>eff_array] = 1.0
    if splice: 
        backgr_str = backgr[0]+" + " + backgr[1]
        add_str = "data from %s up to step %d replaced with data from %s" % (backgr[0],splice,backgr[1])
    else: 
        backgr_str = backgr
        add_str = ""
    # create blocked tube information table
    ttable = ""
    for btube,bstep in edge:
       ttable = ttable + "  %5d%5d\n" % (btube,bstep) 
    return {"_[local]_efficiency_data":eff_array.transpose(),
            "_[local]_efficiency_variance":eff_error.transpose(),
            "contributors":pix_ok_map,
            "_[local]_efficiency_raw_datafile":os.path.basename(v_loc),
            "_[local]_efficiency_raw_timestamp":vtime,
            "_[local]_efficiency_background_datafile":os.path.basename(b_loc),
            "_[local]_efficiency_background_timestamp":btime,
            "_[local]_efficiency_determination_material":"Vanadium",
            "_[local]_efficiency_determination_method":"From flood field produced by 6mm V rod",
            "_[local]_efficiency_pd_instr_2theta_monochr_pre":takeoff,
            "_[local]_efficiency_determination_wavelength":wl,
            "_[local]_efficiency_monochr_omega":omega,
            "_[local]_efficiency_diffrn_radiation_monochromator":crystal,
            "_pd_proc_info_data_reduction":
             "Flood field data lower than values in following table assumed obscured:\n  Tube   Step\n " + ttable + add_str
            }