コード例 #1
0
ファイル: usansdata.py プロジェクト: usnistgov/reductus
 def __init__(self,
              metadata=None,
              countTime=None,
              detCts=None,
              transCts=None,
              monCts=None,
              Q=None):
     self.metadata = metadata
     self.countTime = countTime
     self.detCts = Uncertainty(detCts, detCts + (detCts == 0))
     self.transCts = Uncertainty(transCts, transCts + (transCts == 0))
     self.monCts = Uncertainty(monCts, monCts + (monCts == 0))
     self.Q = Q
     self.Q_offset = 0.0
コード例 #2
0
    def __init__(self,
                 data=None,
                 metadata=None,
                 q=None,
                 qx=None,
                 qy=None,
                 aspect_ratio=1.0,
                 xlabel="X",
                 ylabel="Y",
                 theta=None,
                 Tsam=None,
                 Temp=None,
                 attenuation_corrected=False):
        if isinstance(data, np.ndarray):
            self.data = Uncertainty(data, data)
        else:
            self.data = data
        self.metadata = metadata
        # There are many places where q was not set, i think i fixed most,
        # but there might be more; be wary
        self.q = q
        self.qx = qx
        self.qy = qy
        self.qx_max = None
        self.qy_max = None
        self.qx_min = None
        self.qy_min = None
        self.xlabel = xlabel
        self.ylabel = ylabel
        self.aspect_ratio = aspect_ratio
        self.theta = theta
        self.attenuation_corrected = attenuation_corrected

        self.Tsam = None  #Tsam and Temp are used to store the transmissions for later use
        self.Temp = None
コード例 #3
0
ファイル: steps.py プロジェクト: usnistgov/reductus
def _loadDivData(entry):
    from collections import OrderedDict
    from .vsansdata import VSansDataRealSpace, short_detectors

    div_entries = []

    for sn in short_detectors:
        new_detectors = OrderedDict()
        new_metadata = deepcopy(entry.metadata)
        detname = 'detector_{short_name}'.format(short_name=sn)
        if not detname in entry.detectors:
            continue
        det = deepcopy(entry.detectors[detname])

        data = det['data']['value']
        if 'linear_data_error' in det and 'value' in det['linear_data_error']:
            data_variance = np.sqrt(det['linear_data_error']['value'])
        else:
            data_variance = data
        udata = Uncertainty(data, data_variance)
        det['data'] = udata
        det['norm'] = 1.0
        xDim, yDim = data.shape[:2]
        det['X'] = np.arange(xDim)
        det['Y'] = np.arange(yDim)
        det['dX'] = det['dY'] = 1

        new_metadata['sample.labl'] = detname
        new_detectors[detname] = det
        div_entries.append(VSansDataRealSpace(metadata=new_metadata, detectors=new_detectors))
    
    return div_entries
コード例 #4
0
def correct_attenuation(sample, instrument="NG7"):
    """
    Divide by the attenuation factor from the lookup tables for the instrument

    **Inputs**

    sample (sans2d): measurement

    instrument (opt:NG7|NGB|NGB30): instrument name

    **Returns**

    atten_corrected (sans2d): corrected measurement
    """
    attenNo = sample.metadata['run.atten']
    wavelength = sample.metadata['resolution.lmda']
    attenuation = lookup_attenuation(instrument, attenNo, wavelength)
    att = attenuation['att']
    percent_err = attenuation['att_err']
    att_variance = (att * percent_err / 100.0)**2
    denominator = Uncertainty(att, att_variance)
    atten_corrected = sample.copy()
    atten_corrected.attenuation_corrected = True
    atten_corrected.data /= denominator
    return atten_corrected
コード例 #5
0
ファイル: steps.py プロジェクト: usnistgov/reductus
def monitor_normalize(qdata, mon0=1e8):
    """"
    Given a SansData object, normalize the data to the provided monitor

    **Inputs**

    qdata (qspace): data in

    mon0 (float): provided monitor

    **Returns**

    output (qspace): corrected for monitor counts
    2019-09-19  Brian Maranville
    """
    output = qdata.copy()
    monitor = output.metadata['run.moncnt']
    umon = Uncertainty(monitor, monitor)
    for d in output.detectors:
        output.detectors[d]['data'] *= mon0/umon
    return output
コード例 #6
0
def product(data, factor_param, propagate_error=True):
    """
    Algebraic multiplication of dataset

    **Inputs**

    data (sans2d): data in (a)

    factor_param (params?): multiplication factor (b), defaults to 1

    propagate_error {Propagate error} (bool): if factor_error is passed in, use it

    **Returns**

    output (sans2d): result (c in a*b = c)

    2010-01-02 unknown
    """
    if factor_param is not None:
        if propagate_error:
            variance = factor_param.get('factor_variance', 0.0)
        return data * Uncertainty(factor_param.get('factor', 1.0), variance)
    else:
        return data
コード例 #7
0
ファイル: steps.py プロジェクト: usnistgov/reductus
def calculate_XY(raw_data, solid_angle_correction=True):
    """
    from embedded detector metadata, calculates the x,y,z values for each detector.

    **Inputs**

    raw_data (raw[]): raw datafiles

    solid_angle_correction (bool): Divide by solid angle

    **Returns**

    realspace_data (realspace[]): datafiles with realspace information

    | 2018-04-28 Brian Maranville
    | 2019-09-19 Added monitor normalization
    | 2019-09-22 Separated monitor and dOmega norm
    """
    from .vsansdata import VSansDataRealSpace, short_detectors
    from collections import OrderedDict

    output = []
    for r in raw_data:
        metadata = deepcopy(r.metadata)
        monitor_counts = metadata['run.moncnt']
        new_detectors = OrderedDict()
        for sn in short_detectors:
            detname = 'detector_{short_name}'.format(short_name=sn)
            det = deepcopy(r.detectors[detname])

            dimX = int(det['pixel_num_x']['value'][0])
            dimY = int(det['pixel_num_y']['value'][0])
            z_offset = det.get('setback', {"value": [0.0]})['value'][0]
            z = det['distance']['value'][0] + z_offset

            if sn == "B":
                # special handling for back detector
                total = det['integrated_count']['value'][0]
                if total < 1:
                    # don't load the back detector if it has no counts (turned off)
                    continue
                beam_center_x_pixels = det['beam_center_x']['value'][0] # in pixels
                beam_center_y_pixels = det['beam_center_y']['value'][0]

                cal_x = det['cal_x']['value'] # in cm
                cal_y = det['cal_y']['value']

                x_pixel_size = cal_x[0] # cm
                y_pixel_size = cal_y[0] # cm

                beam_center_x = x_pixel_size * beam_center_x_pixels
                beam_center_y = y_pixel_size * beam_center_y_pixels

                # lateral_offset = det['lateral_offset']['value'][0] # # already cm
                realDistX =  0.5 * x_pixel_size
                realDistY =  0.5 * y_pixel_size

                data = det['data']['value']
                if 'linear_data_error' in det and 'value' in det['linear_data_error']:
                    data_variance = np.sqrt(det['linear_data_error']['value'])
                else:
                    data_variance = data
                udata = Uncertainty(data, data_variance)

            else:
                
                orientation = det['tube_orientation']['value'][0].decode().upper()
                coeffs = det['spatial_calibration']['value']
                lateral_offset = 0
                vertical_offset = 0
                beam_center_x = det['beam_center_x']['value'][0]
                beam_center_y = det['beam_center_y']['value'][0]
                panel_gap = det['panel_gap']['value'][0]/10.0 # mm to cm
                if (orientation == "VERTICAL"):
                    x_pixel_size = det['x_pixel_size']['value'][0] / 10.0 # mm to cm
                    y_pixel_size = coeffs[1][0] / 10.0 # mm to cm 
                    lateral_offset = det['lateral_offset']['value'][0] # # already cm

                else:
                    x_pixel_size = coeffs[1][0] / 10.0
                    y_pixel_size = det['y_pixel_size']['value'][0] / 10.0 # mm to cm
                    vertical_offset = det['vertical_offset']['value'][0] # already cm

                #solid_angle_correction = z*z / 1e6
                data = det['data']['value']
                if 'linear_data_error' in det and 'value' in det['linear_data_error']:
                    data_variance = np.sqrt(det['linear_data_error']['value'])
                else:
                    data_variance = data
                udata = Uncertainty(data, data_variance)
                position_key = sn[-1]
                if position_key == 'T':
                    # FROM IGOR: (q,p = 0 for lower-left pixel) 
                    # if(cmpstr("T",detStr[1]) == 0)
                    #   data_realDistY[][] = tube_width*(q+1/2) + offset + gap/2		
                    #   data_realDistX[][] = coefW[0][q] + coefW[1][q]*p + coefW[2][q]*p*p
                    realDistX =  coeffs[0][0]/10.0 # to cm
                    realDistY =  0.5 * y_pixel_size + vertical_offset + panel_gap/2.0
                
                elif position_key == 'B':
                    # FROM IGOR: (q,p = 0 for lower-left pixel) 
                    # if(cmpstr("B",detStr[1]) == 0)
                    #   data_realDistY[][] = offset - (dimY - q - 1/2)*tube_width - gap/2
                    #   data_realDistX[][] = coefW[0][q] + coefW[1][q]*p + coefW[2][q]*p*p
                    realDistX =  coeffs[0][0]/10.0
                    realDistY =  vertical_offset - (dimY - 0.5)*y_pixel_size - panel_gap/2.0
                    
                elif position_key == 'L':
                    # FROM IGOR: (q,p = 0 for lower-left pixel) 
                    # if(cmpstr("L",detStr[1]) == 0)
                    #   data_realDistY[][] = coefW[0][p] + coefW[1][p]*q + coefW[2][p]*q*q
                    #   data_realDistX[][] = offset - (dimX - p - 1/2)*tube_width - gap/2
                    realDistX =  lateral_offset - (dimX - 0.5)*x_pixel_size - panel_gap/2.0
                    realDistY =  coeffs[0][0]/10.0
                    
                elif position_key == 'R':
                    # FROM IGOR: (q,p = 0 for lower-left pixel) 
                    #   data_realDistY[][] = coefW[0][p] + coefW[1][p]*q + coefW[2][p]*q*q
                    #   data_realDistX[][] = tube_width*(p+1/2) + offset + gap/2
                    realDistX =  x_pixel_size*(0.5) + lateral_offset + panel_gap/2.0
                    realDistY =  coeffs[0][0]/10.0

            #x_pos = size_x/2.0 # place panel with lower-right corner at center of view
            #y_pos = size_y/2.0 # 
            x0_pos = realDistX - beam_center_x # then move it the 'real' distance away from the origin,
            y0_pos = realDistY - beam_center_y # which is the beam center

            #metadata['det_' + short_name + '_x0_pos'] = x0_pos
            #metadata['det_' + short_name + '_y0_pos'] = y0_pos
            X,Y = np.indices((dimX, dimY))
            X = X * x_pixel_size + x0_pos
            Y = Y * y_pixel_size + y0_pos
            det['data'] = udata
            det['X'] = X
            det['dX'] = x_pixel_size
            det['Y'] = Y
            det['dY'] = y_pixel_size
            det['Z'] = z
            det['dOmega'] = x_pixel_size * y_pixel_size / z**2
            if solid_angle_correction:
                det['data'] /= det['dOmega']

            new_detectors[detname] = det
        output.append(VSansDataRealSpace(metadata=metadata, detectors=new_detectors))

    return output
コード例 #8
0
ファイル: steps.py プロジェクト: ronjones432/reductus
def correctJoinData(sample,
                    empty,
                    q_tol=0.01,
                    bkg_level=0.0,
                    emp_level=0.0,
                    thick=1.0,
                    dOmega=7.1e-7):
    """"
    Do the final data reduction.  Requires sample and empty datasets, normalized.
    Multiple inputs in either will be joined into one dataset before processing.
    
    **Inputs**

    sample (data[]): data in

    empty (data): empty in

    q_tol (float): values closer together than this tolerance in Q will be joined into a single point

    bkg_level (float): background level

    emp_level (float): empty background level

    thick (float): thickness of sample, in cm

    dOmega (float): solid angle of detector (steradians)

    **Returns**

    corrected (cor): corrected output

    corrected_info (params[]): correction info

    2020-01-29 Brian Maranville
    """

    from dataflow.lib.uncertainty import Uncertainty
    from .usansdata import USansCorData
    from sansred.sansdata import Parameters

    data_groups = make_groups([s.Q for s in sample])

    reduced_pairs = [
        correctData(s,
                    empty,
                    bkg_level=bkg_level,
                    emp_level=emp_level,
                    thick=thick,
                    dOmega=dOmega) for s in sample
    ]

    reduced_values = [r[0] for r in reduced_pairs]
    reduced_infos = [r[1].params for r in reduced_pairs]

    Qvals = []
    iqCOR_x = []
    iqCOR_variance = []
    for g in data_groups:
        qmean = np.array([x for x, i, j in g]).mean()
        iq = [reduced_values[i].iqCOR[j] for x, i, j in g]
        iqmean = sum(iq) / len(iq)
        Qvals.append(qmean)
        iqCOR_x.append(iqmean.x)
        iqCOR_variance.append(iqmean.variance)

    iqCOR = Uncertainty(iqCOR_x, iqCOR_variance)
    Qvals = np.array(Qvals)

    corrected = USansCorData(metadata=reduced_infos, iqCOR=iqCOR, Q=Qvals)

    return corrected, [Parameters(info) for info in reduced_infos]
コード例 #9
0
ファイル: steps.py プロジェクト: ronjones432/reductus
def correctData(sample,
                empty,
                bkg_level=0.0,
                emp_level=0.0,
                thick=1.0,
                dOmega=7.1e-7):
    """"
    Do the final data reduction.  Requires a data and empty, normalized.
    
    **Inputs**

    sample (data): data in

    empty (data): empty in

    bkg_level (float): background level

    emp_level (float): empty background level

    thick (float): thickness of sample, in cm

    dOmega (float): solid angle of detector (steradians)

    **Returns**

    corrected (data): corrected output

    corrected_info (params): correction info

    2020-01-29 Brian Maranville
    """
    from dataflow.lib.uncertainty import Uncertainty
    from .usansdata import USansCorData
    from sansred.sansdata import Parameters
    # find q-range of empty:
    empty_qmax = empty.Q.max()
    empty_qmin = empty.Q.min()
    empty_mask = np.logical_and(sample.Q >= empty_qmin, sample.Q <= empty_qmax)
    interpolated_empty = np.interp(sample.Q,
                                   empty.Q,
                                   empty.detCts.x,
                                   left=emp_level,
                                   right=emp_level)
    interpolated_empty_err = np.interp(sample.Q,
                                       empty.Q,
                                       empty.detCts.variance,
                                       left=0,
                                       right=0)
    tempI = Uncertainty(interpolated_empty, interpolated_empty_err)

    Twide_sam, _ = _findTWideCts(sample)
    Twide_emp, _ = _findTWideCts(empty)

    pkHtEMP = empty.detCts.x.max()
    pkHtSAM = sample.detCts.x.max()

    Trock = pkHtSAM / pkHtEMP
    Twide = Twide_sam / Twide_emp

    ratio = Trock / Twide

    iqCOR = sample.detCts - Trock * tempI - (1 - Trock) * bkg_level

    scale = 1 / (Twide * thick * dOmega * pkHtEMP)

    iqCOR *= scale

    info = {
        "Sample file": sample.metadata["run.filename"],
        "Empty file": empty.metadata["run.filename"],
        "Trock/Twide": ratio.x,
        "Thickness": thick,
        "Twide": Twide.x,
        "Trock": Trock,
        "Sample Peak Angle": getattr(sample, 'Q_offset', 0.0),
        "Empty Peak Angle": getattr(empty, 'Q_offset', 0.0),
        "Empty level": emp_level,
        "Bkg level": bkg_level,
        "dQv": sample.metadata["dQv"],
        "Start time": sample.metadata["start_time"],
        "Entry": sample.metadata["entry"],
    }

    corrected = USansCorData(metadata=info, iqCOR=iqCOR, Q=sample.Q)

    return corrected, Parameters(info)
コード例 #10
0
def absolute_scaling(sample,
                     empty,
                     div,
                     Tsam,
                     instrument="NG7",
                     integration_box=[55, 74, 53, 72]):
    """
    Calculate absolute scaling

    Coords are taken with reference to bottom left of the image.

    **Inputs**

    sample (sans2d): measurement with sample in the beam

    empty (sans2d): measurement with no sample in the beam

    div (sans2d): DIV measurement

    Tsam (params): sample transmission

    instrument (opt:NG7|NGB|NGB30): instrument name, should be NG7 or NG3

    integration_box (range:xy): region over which to integrate

    **Returns**

    abs (sans2d): data on absolute scale

    2017-01-13 Andrew Jackson
    """
    # data (that is going through reduction), empty beam,
    # div, Transmission of the sample, instrument(NG3.NG5, NG7)
    # ALL from metadata
    detCnt = empty.metadata['run.detcnt']
    countTime = empty.metadata['run.rtime']
    monCnt = empty.metadata['run.moncnt']
    sdd = empty.metadata['det.dis']  # already in cm
    pixel = empty.metadata['det.pixelsizex']  # already in cm
    lambd = wavelength = empty.metadata['resolution.lmda']

    if not empty.attenuation_corrected:
        attenNo = empty.metadata['run.atten']
        # Need attenTrans - AttenuationFactor - need to know whether NG3, NG5 or NG7 (acctStr)
        attenuation = lookup_attenuation(instrument, attenNo, wavelength)
        att = attenuation['att']
        percent_err = attenuation['att_err']
        att_variance = (att * percent_err / 100.0)**2
        attenTrans = Uncertainty(att, att_variance)
    else:
        # If empty is already corrected for attenuation, don't do it here:
        attenTrans = Uncertainty(1.0, 0.0)

    #-------------------------------------------------------------------------------------#

    # Correct empty beam by the sensitivity
    data = empty.__truediv__(div.data)
    # Then take the sum in XY box, including stat. error
    xmin, xmax, ymin, ymax = map(int, integration_box)
    detCnt = np.sum(data.data[xmin:xmax + 1, ymin:ymax + 1])
    print("DETCNT: ", detCnt)

    #------End Result-------#
    # This assumes that the data is has not been normalized at all.
    # Thus either fix this or pass un-normalized data.
    # Compute kappa = incident intensity * solid angle of the pixel
    kappa = detCnt / attenTrans * 1.0e8 / monCnt * (pixel / sdd)**2
    #print("Kappa: ", kappa)

    #utc_datetime = date.datetime.utcnow()
    #print(utc_datetime.strftime("%Y-%m-%d %H:%M:%S"))

    Tsam_factor = Uncertainty(Tsam['factor'], Tsam['factor_variance'])

    #-----Using Kappa to Scale data-----#
    Dsam = sample.metadata['sample.thk']
    ABS = sample.__mul__(1 / (kappa * Dsam * Tsam_factor))
    #------------------------------------
    return ABS
コード例 #11
0
def radialToCylindrical(data,
                        theta_offset=0.0,
                        oversample_th=2.0,
                        oversample_r=2.0):
    """
    Convert radial data to cylindrical coordinates

    **Inputs**

    data (sans2d): data to be transformed

    theta_offset (float): move the bounds of the output from the default (0 to 360 deg)

    oversample_th (float): oversampling in theta (to increase fidelity of output)

    oversample_r (float): oversampling in r

    **Returns**

    cylindrical (sans2d): transformed data

    mask (sans2d): normalization array

    2017-05-26 Brian Maranville
    """

    from .cylindrical import ConvertToCylindrical

    if data.qx is None or data.qy is None:
        xmin = -data.metadata['det.beamx']
        xmax = xmin + 128
        ymin = -data.metadata['det.beamy']
        ymax = ymin + 128
    else:
        xmin = data.qx.min()
        xmax = data.qx.max()
        ymin = data.qy.min()
        ymax = data.qy.max()

    print(xmin, xmax, ymin, ymax)
    _, normalization, normalized, extent = ConvertToCylindrical(
        data.data.x.T,
        xmin,
        xmax,
        ymin,
        ymax,
        theta_offset=theta_offset,
        oversample_th=oversample_th,
        oversample_r=oversample_r)

    output = data.copy()
    output.aspect_ratio = None
    output.data = Uncertainty(normalized.T, normalized.T)

    mask = data.copy()
    mask.aspect_ratio = None
    mask.data = Uncertainty(normalization.T, normalization.T)

    if data.qx is not None:
        output.qx = np.linspace(extent[0], extent[1], normalized.shape[1])
        mask.qx = output.qx.copy()
        output.xlabel = mask.xlabel = "theta (degrees)"

    if data.qy is not None:
        output.qy = np.linspace(extent[2], extent[3], normalized.shape[0])
        mask.qy = output.qy.copy()
        output.ylabel = mask.ylabel = "Q (inv. Angstrom)"

    return output, mask