示例#1
0
文件: Analysis.py 项目: robios/PyTES
def ka(pha, sigma=1):
    """
    Return a k-alpha data set.
    
    Parameters (and their default values):
        pha:    pha (and optional companion) data (array-like)
        sigma:  sigmas allowed for median filter (Default: 1)

    Return (data)
        data:   k-alpha data set
    """
    
    pha = np.asarray(pha)

    # Pre-selection for more robustness (dirty hack)
    if pha.ndim > 1:
        mask = median_filter(pha[:,0], sigma=sigma+1)
    else:
        mask = median_filter(pha, sigma=sigma+1)

    pha = pha[mask]
    
    if pha.ndim > 1:
        mask = median_filter(pha[:,0], sigma=sigma)
    else:
        mask = median_filter(pha, sigma=sigma)
    
    return pha[mask]
示例#2
0
def kb(pha, sigma=1):
    """
    Find a k-beta data set.
    
    Parameters (and their default values):
        pha:    pha (and optional companion) data (array-like)
        sigma:  sigmas allowed for median filter (Default: 1)

    Return (data)
        data:   k-beta data set
    """
    
    pha = np.asarray(pha)
    
    if pha.ndim > 1:
        _pha = pha[:,0]
    else:
        _pha = pha
    
    ka_mean = ka(_pha).mean()
    ka_mask = median_filter(_pha, sigma=sigma)
    
    kb_mask = median_filter(_pha[(ka_mask == False) & (_pha>ka_mean)], sigma=sigma)
    kb_pha  = pha[(ka_mask == False) & (_pha>ka_mean)]
    
    return kb_pha[kb_mask]
示例#3
0
文件: Analysis.py 项目: robios/PyTES
def _offset_correction(pha, offset, sigma=1, thre=0.4, full=False):
    """
    Perform an offset (DC level) correction for PHA
    
    Parameters (and their default values):
        pha:    pha data (array-like)
        offset: offset data (array-like)
        sigma:  sigmas allowed for median filter (Default: 1)
        thre:   correlation coefficient threshold (Default: 0.4)
        full:   return full output if True (Default: False)
    
    Return (pha) if full is True otherwise (pha, (a, b), coef):
        pha:    corrected pha data
        a, b:   fitting results
        coef:   correlation coefficient
    
    Note:
        - Correction will not be done if absolute correlation coefficient
          is lower than threshold.
    """
    
    # Sanity check
    if len(pha) != len(offset):
        raise ValueError("data length of pha and offset does not match")
    
    # Zip pha and offset
    data = np.vstack((pha, offset)).T
    
    # Correction using K-alpha
    ka_pha, ka_offset = ka(data, sigma=sigma).T
    ka_pha_mask = median_filter(ka_pha, sigma=sigma)
    ka_offset_mask = median_filter(ka_offset, sigma=sigma)
    ka_pha = ka_pha[ka_pha_mask & ka_offset_mask]
    ka_offset = ka_offset[ka_pha_mask & ka_offset_mask]
    
    # Check correlation coefficient
    coef = np.corrcoef(((ka_pha), (ka_offset)))[0,1]
    
    # Perform correction when corrcoef exceeds threshold
    if abs(coef) > thre:
        popt, covt = curve_fit(lambda x, a, b: a*(1+b*x), ka_offset, ka_pha)
        ka_a, ka_b = popt
        corrected_pha = pha/(1+ka_b*offset)
    else:
        corrected_pha = pha
        ka_a, ka_b = None, None
    
    if full:
        return corrected_pha, (ka_a, ka_b), coef
    else:
        return corrected_pha
示例#4
0
def ka(pha, sigma=1):
    """
    Return a k-alpha data set.
    
    Parameters (and their default values):
        pha:    pha (and optional companion) data (array-like)
        sigma:  sigmas allowed for median filter (Default: 1)

    Return (data)
        data:   k-alpha data set
    """
    
    pha = np.asarray(pha)
    
    if pha.ndim > 1:
        mask = median_filter(pha[:,0], sigma=sigma)
    else:
        mask = median_filter(pha, sigma=sigma)
    
    return pha[mask]
示例#5
0
def yopen(filenumber, summary=False, nf=None, tmin=None, tmax=None):
    """
    Read Yokogawa WVF file
    
    Parameters
    ==========
        filenumber: file number to read
        summary:    to summary waves (default: False)
        nf:         sigmas for valid data using median noise filter, None to disable noise filter (default: None)
        tmin:       lower boundary of time for partial extraction, scaler or list (Default: None)
        tmax:       upper boundary of time for partial extraction, scaler or list (Default: None)
    
    Returns
    =======
        if summary is False:
            [ t1, d1, t2, d2, t3, d3, ... ]
        
        if summary is True:
            [ t1, d1, err1, t2, d2, err2, ... ]
        
        where t1 is timing for 1st ch, d1 is data for 1st ch, err1 is error (1sigma) for 1st ch, and so on.
    """

    # Read header (HDR)
    h = open(str(filenumber) + ".HDR")
    lines = h.readlines()
    h.close()

    # Parse $PublicInfo
    for line in lines:
        token = line.split()

        if len(token) > 0:
            # Check endian
            if token[0] == "Endian":
                endian = '>' if token[1] == "Big" else '<'

            # Check data format
            if token[0] == "DataFormat":
                format = token[1]
                assert format == "Block"

            # Check # of groups
            if token[0] == "GroupNumber":
                groups = int(token[1])

            # Check # of total traces
            if token[0] == "TraceTotalNumber":
                ttraces = int(token[1])

            # Check data offset
            if token[0] == "DataOffset":
                offset = int(token[1])

    # Initialize containers
    traces = [None] * groups  # Number of traces for each group
    blocks = [None] * ttraces  # Number of blocks for each trace
    bsizes = [None] * ttraces  # Block size for each trace
    vres = [None] * ttraces  # VResolution for each trace
    voffset = [None] * ttraces  # VOffset for each trace
    hres = [None] * ttraces  # HResolution for each trace
    hoffset = [None] * ttraces  # HOffset for each trace

    # Parse $Group
    for line in lines:
        token = line.split()

        if len(token) > 0:
            # Read current group number
            if token[0][:6] == "$Group":
                cgn = int(token[0][6:]) - 1  # Current group number (minus 1)

            # Check # of traces in this group
            if token[0] == "TraceNumber":
                traces[cgn] = int(token[1])
                traceofs = np.sum(traces[:cgn], dtype=int)

            # Check # of Blocks
            if token[0] == "BlockNumber":
                blocks[traceofs:traceofs +
                       traces[cgn]] = [int(token[1])] * traces[cgn]

            # Check Block Size
            if token[0] == "BlockSize":
                bsizes[traceofs:traceofs +
                       traces[cgn]] = [int(s) for s in token[1:]]

            # Check VResolusion
            if token[0] == "VResolution":
                vres[traceofs:traceofs +
                     traces[cgn]] = [float(res) for res in token[1:]]

            # Check VOffset
            if token[0] == "VOffset":
                voffset[traceofs:traceofs +
                        traces[cgn]] = [float(ofs) for ofs in token[1:]]

            # Check VDataType
            if token[0] == "VDataType":
                assert token[1] == "IS2"

            # Check HResolution
            if token[0] == "HResolution":
                hres[traceofs:traceofs +
                     traces[cgn]] = [float(res) for res in token[1:]]

            # Check HOffset
            if token[0] == "HOffset":
                hoffset[traceofs:traceofs +
                        traces[cgn]] = [float(ofs) for ofs in token[1:]]

    # Data Initialization
    time = [
        np.array(range(bsizes[t])) * hres[t] + hoffset[t]
        for t in range(ttraces)
    ]
    data = [[None] * blocks[t] for t in range(ttraces)]

    # Open WVF
    f = open(str(filenumber) + ".WVF", 'rb')
    f.seek(offset)

    # Read WVF
    if format == "Block":
        # Block format (assuming block size is the same for all the traces in Block format)
        for b in range(blocks[0]):
            for t in range(ttraces):
                data[t][b] = np.array(
                    unpack(endian + 'h' * bsizes[t], f.read(
                        bsizes[t] * 2))) * vres[t] + voffset[t]
    else:
        # Trace format
        for t in range(ttraces):
            for b in range(blocks[t]):
                data[t][b] = np.array(
                    unpack(endian + 'h' * bsizes[t], f.read(
                        bsizes[t] * 2))) * vres[t] + voffset[t]

    # Array conversion
    for t in range(ttraces):
        data[t] = np.array(data[t])

    # Tmin/Tmax filtering
    for t in range(ttraces):
        if type(tmin) == list or type(tmax) == list:
            if not (type(tmin) == list and type(tmax) == list
                    and len(tmin) == len(tmax)):
                raise ValueError(
                    "tmin and tmax both have to be list and have to have the same length."
                )
            mask = np.add.reduce([(time[t] >= _tmin) & (time[t] < _tmax)
                                  for (_tmax, _tmin) in zip(tmax, tmin)],
                                 dtype=bool)
        else:
            _tmin = min(time[t]) if tmin is None else tmin
            _tmax = max(time[t]) + 1 if tmax is None else tmax
            mask = (time[t] >= _tmin) & (time[t] < _tmax)

        data[t] = data[t][:, mask]
        time[t] = time[t][mask]

    f.close()

    if summary is False:
        # Return wave data as is
        return [[time[t], data[t]] for t in range(ttraces)]
    else:
        if nf is None:
            # Noise filter is off
            return [[
                time[t],
                np.mean(data[t], axis=0),
                np.std(data[t], axis=0, ddof=1)
            ] for t in range(ttraces)]
        else:
            # Noise filter is on
            return [[
                time[t],
                np.apply_along_axis(lambda a: np.mean(a[median_filter(a, nf)]),
                                    0, data[t]),
                np.apply_along_axis(
                    lambda a: np.std(a[median_filter(a, nf)], ddof=1), 0,
                    data[t])
            ] for t in range(ttraces)]
示例#6
0
文件: Analysis.py 项目: robios/PyTES
def fit_offset(pha, offset, sigma=1, prange=None, orange=None, method='ols', flip=False):
    """
    Fit a pha and an offset (DC level)
    
    Parameters (and their default values):
        pha:    pha data (array-like)
        offset: offset data (array-like)
        sigma:  sigmas allowed for median filter (Default: 1)
        prange: a tuple of range for pha to fit if not None (Default: None)
        orange: a tuple of range for offset to fit if not None (Default: None)
        method: fitting method from ols (ordinal least squares)
                or odr (orthogonal distance regression) (Default: ols)
        flip:   flip pha and offset when fitting if True (Default: False)
    
    Return ((a, b), coef):
        a, b:   fitting results to pha = a*(1+b*offset)
        coef:   correlation coefficient
    """

    # Sanity check
    if len(pha) != len(offset):
        raise ValueError("data length of pha and offset does not match")
    
    pha = np.asarray(pha)
    offset = np.asarray(offset)
    
    # Reduction
    if prange is not None:
        pmask = (pha >= prange[0]) & (pha <= prange[1])
    else:
        pmask = median_filter(pha, sigma=sigma)
    
    if orange is not None:
        omask = (offset >= orange[0]) & (offset <= orange[1])
    else:
        omask = median_filter(offset, sigma=sigma)
    
    mask = pmask & omask
    
    # Correlation coefficient
    coef = np.corrcoef(pha[mask], offset[mask])[0,1]
    
    # Fitting to a*x+b
    if method.lower() == 'ols':
        if flip:
            _a, _b = np.polyfit(pha[mask], offset[mask], 1)
        else:
            _a, _b = np.polyfit(offset[mask], pha[mask], 1)
    elif method.lower() == 'odr':
        f = models.polynomial(1)
        if flip:
            data = odrpack.Data(pha[mask], offset[mask])
        else:
            data = odrpack.Data(offset[mask], pha[mask])
        odr = odrpack.ODR(data, f, maxit=100)
        fit = odr.run()
        _a, _b = fit.beta[::-1]
    else:
        raise ValueError('Unknown method: %s' % method)
    
    if flip:
        a = -_b/_a
        b = -1/_b
    else:
        a = _b
        b = _a/_b
    
    return (a, b), coef
示例#7
0
def yopen(filenumber, summary=False, nf=None, tmin=None, tmax=None):
    """
    Read Yokogawa WVF file
    
    Parameters
    ==========
        filenumber: file number to read
        summary:    to summary waves (default: False)
        nf:         sigmas for valid data using median noise filter, None to disable noise filter (default: None)
        tmin:       lower boundary of time for partial extraction, scaler or list (Default: None)
        tmax:       upper boundary of time for partial extraction, scaler or list (Default: None)
    
    Returns
    =======
        if summary is False:
            [ t1, d1, t2, d2, t3, d3, ... ]
        
        if summary is True:
            [ t1, d1, err1, t2, d2, err2, ... ]
        
        where t1 is timing for 1st ch, d1 is data for 1st ch, err1 is error (1sigma) for 1st ch, and so on.
    """
    
    # Read header (HDR)
    h = open(str(filenumber) + ".HDR")
    lines = h.readlines()
    h.close()
    
    # Parse $PublicInfo
    for line in lines:
        token = line.split()
        
        if len(token) > 0:
            # Check endian
            if token[0] == "Endian":
                endian = '>' if token[1] == "Big" else '<'
            
            # Check data format
            if token[0] == "DataFormat":
                format = token[1]
                assert format == "Block"
                
            # Check # of groups
            if token[0] == "GroupNumber":
                groups = int(token[1])
            
            # Check # of total traces
            if token[0] == "TraceTotalNumber":
                ttraces = int(token[1])
            
            # Check data offset
            if token[0] == "DataOffset":
                offset = int(token[1])
    
    # Initialize containers
    traces = [None] * groups        # Number of traces for each group
    blocks = [None] * ttraces       # Number of blocks for each trace
    bsizes = [None] * ttraces       # Block size for each trace
    vres = [None] * ttraces         # VResolution for each trace
    voffset = [None] * ttraces      # VOffset for each trace
    hres = [None] * ttraces         # HResolution for each trace
    hoffset = [None] * ttraces      # HOffset for each trace
    
    # Parse $Group
    for line in lines:
        token = line.split()

        if len(token) > 0:
            # Read current group number
            if token[0][:6] == "$Group":
                cgn = int(token[0][6:]) - 1  # Current group number (minus 1)
            
            # Check # of traces in this group
            if token[0] == "TraceNumber":
                traces[cgn] = int(token[1])
                traceofs = np.sum(traces[:cgn], dtype=int)
                        
            # Check # of Blocks
            if token[0] == "BlockNumber":
                blocks[traceofs:traceofs+traces[cgn]] = [ int(token[1]) ] * traces[cgn]
            
            # Check Block Size
            if token[0] == "BlockSize":
                bsizes[traceofs:traceofs+traces[cgn]] = [ int(s) for s in token[1:] ]
            
            # Check VResolusion
            if token[0] == "VResolution":
                vres[traceofs:traceofs+traces[cgn]] = [ float(res) for res in token[1:] ]
            
            # Check VOffset
            if token[0] == "VOffset":
                voffset[traceofs:traceofs+traces[cgn]] = [ float(ofs) for ofs in token[1:] ]
            
            # Check VDataType
            if token[0] == "VDataType":
                assert token[1] == "IS2"
            
            # Check HResolution
            if token[0] == "HResolution":
                hres[traceofs:traceofs+traces[cgn]] = [ float(res) for res in token[1:] ]
            
            # Check HOffset
            if token[0] == "HOffset":
                hoffset[traceofs:traceofs+traces[cgn]] = [ float(ofs) for ofs in token[1:] ]
        
    # Data Initialization
    time = [ np.array(range(bsizes[t])) * hres[t] + hoffset[t] for t in range(ttraces) ]
    data = [ [None] * blocks[t] for t in range(ttraces) ]
    
    # Open WVF
    f = open(str(filenumber) + ".WVF", 'rb')
    f.seek(offset)
    
    # Read WVF
    if format == "Block":
        # Block format (assuming block size is the same for all the traces in Block format)
        for b in range(blocks[0]):
            for t in range(ttraces):
                data[t][b] = np.array(unpack(endian + 'h'*bsizes[t], f.read(bsizes[t]*2))) * vres[t] + voffset[t]
    else:
        # Trace format
        for t in range(ttraces):
            for b in range(blocks[t]):
                data[t][b] = np.array(unpack(endian + 'h'*bsizes[t], f.read(bsizes[t]*2))) * vres[t] + voffset[t]

    # Array conversion
    for t in range(ttraces):
        data[t] = np.array(data[t])
    
    # Tmin/Tmax filtering
    for t in range(ttraces):
        if type(tmin) == list or type(tmax) == list:
            if not (type(tmin) == list and type(tmax) == list and len(tmin) == len(tmax)):
                raise ValueError("tmin and tmax both have to be list and have to have the same length.")
            mask = np.add.reduce([ (time[t] >= _tmin) & (time[t] < _tmax) for (_tmax, _tmin) in zip(tmax, tmin)], dtype=bool)
        else:
            _tmin = min(time[t]) if tmin is None else tmin
            _tmax = max(time[t]) + 1 if tmax is None else tmax
            mask = (time[t] >= _tmin) & (time[t] < _tmax)
        
        data[t] = data[t][:, mask]
        time[t] = time[t][mask]
        
    f.close()
    
    if summary is False:
        # Return wave data as is
        return [ [ time[t], data[t] ] for t in range(ttraces)  ]
    else:
        if nf is None:
            # Noise filter is off
            return [ [ time[t], np.mean(data[t], axis=0), np.std(data[t], axis=0, ddof=1) ]
                        for t in range(ttraces) ]
        else:
            # Noise filter is on
            return [ [ time[t],
                        np.apply_along_axis(lambda a: np.mean(a[median_filter(a, nf)]), 0,  data[t]),
                        np.apply_along_axis(lambda a: np.std(a[median_filter(a, nf)], ddof=1), 0, data[t]) ]
                            for t in range(ttraces) ]