Пример #1
0
 def test_ufunc_two_outputs(self):
     mantissa, exponent = np.frexp(2 ** -3)
     expected = (ArrayLike(mantissa), ArrayLike(exponent))
     _assert_equal_type_and_value(
         np.frexp(ArrayLike(2 ** -3)), expected)
     _assert_equal_type_and_value(
         np.frexp(ArrayLike(np.array(2 ** -3))), expected)
Пример #2
0
    def computeActivity(self, inputActivity):
        logger.debug('computing activity.')
        self.ensureLength(inputActivity.max())

        # numpy array magic
        idx = numpy.mgrid[0:self.dims[0], 0:self.dims[1], 0:self.dims[2]]
        tInputActivity = numpy.tile(inputActivity, self.dims[:-1] + (1,))
        factors = 2 * self.counts[idx[0],idx[1],idx[2],tInputActivity] / numpy.sum(self.counts, axis=3)
        mans,exps = numpy.frexp(factors)
        mantissas, exponents = numpy.frexp(numpy.prod(mans, axis=2))
        exponents += exps.sum(axis=2)

        if self.maxexp is not None:
            maxexp = self.maxexp
        else:
            maxexp = exponents.max()

        exponents -= maxexp
        logger.debug("Maximum exponent: %d", maxexp)
        activity = mantissas * numpy.exp2(exponents)

        if self.p != 0:
            conscience = (self.coff / self.con)**self.p
            activity *= conscience

        activity *= numpy.prod(activity.shape) / activity.sum()
        return activity
Пример #3
0
  def _detectEndian (self):
    if (self.endian != 'Auto'):
      self._maybePrint('%s endian specified... Not autodetecting.'%(self.endian,))
      if (self.endian != self.mendian):
        self._maybePrint('%s endian != %s endian, therefore Foreign.'%(self.endian,self.mendian))
        self.endian = 'Foreign'
    else:
      self._maybePrint('Auto endian specified... Trying to autodetect data endianness.')
      for i in xrange(1, self.ntr+1):
        locar = self.readTraces(i)
        if ((not abs(locar).sum() == 0.) and (not _np.isnan(locar.mean()))):
          nexp = abs(_np.frexp(locar.mean())[1])
          locar = locar.newbyteorder()
          fexp = abs(_np.frexp(locar.mean())[1])
          if (fexp > nexp):
            self.endian = 'Native'
          else:
            self.endian = 'Foreign'
          self._maybePrint('Scanned %d trace(s). Endian appears to be %s.'%(i, self.endian))
          break

      if (self.endian == 'Foreign'):
        self._maybePrint('Will attempt to convert to %s endian when traces are read.\n'%(self.mendian,))
      elif (self.endian == 'Auto'):
        self._maybePrint('Couldn\'t find any non-zero traces to test!\nAssuming Native endian.\n')
Пример #4
0
    def _detectFileEndian (self):
        if (self.endian != 'Auto'):
            self._maybePrint('%s endian specified... Not autodetecting.'%(self.endian,))
            if (self.endian != self.mendian):
                self._maybePrint('%s endian != %s endian, therefore Foreign.'%(self.endian,self.mendian))
                self.endian = 'Foreign'
        else:
            self._maybePrint('Auto endian specified... Trying to autodetect data endianness.')
            with warnings.catch_warnings():
                warnings.simplefilter('ignore')
                for i in xrange(self.ntr):
                    locar = self[i]
                    if ((not abs(locar).sum() == 0.) and (not np.isnan(locar.mean()))):
                        nexp = abs(np.frexp(locar.astype(np.float64)**2)[1]).mean()
                        locar = locar.newbyteorder()
                        fexp = abs(np.frexp(locar.astype(np.float64)**2)[1]).mean()
                        if (fexp > nexp):
                            self.endian = 'Native'
                        else:
                            self.endian = 'Foreign'
                        self._maybePrint('Scanned %d trace(s). Endian appears to be %s.'%(i, self.endian))
                        break

            if (self.endian == 'Foreign'):
                self._maybePrint('Will attempt to convert to %s endian when traces are read.\n'%(self.mendian,))
            elif (self.endian == 'Auto'):
                self._maybePrint('Couldn\'t find any non-zero traces to test!\nAssuming Native endian.\n')
Пример #5
0
def nextpow2(n):
    """Return the next power of 2 such as 2^p >= n.

    Notes
    -----

    Infinite and nan are left untouched, negative values are not allowed."""
    if np.any(n < 0):
        raise ValueError("n should be > 0")

    if np.isscalar(n):
        f, p = np.frexp(n)
        if f == 0.5:
            return p-1
        elif np.isfinite(f):
            return p
        else:
            return f
    else:
        f, p = np.frexp(n)
        res = f
        bet = np.isfinite(f)
        exa = (f == 0.5)
        res[bet] = p[bet]
        res[exa] = p[exa] - 1
        return res
Пример #6
0
    def test_frexp_invalid_units(self):
        # Can't use prod() with non-dimensionless quantities
        with pytest.raises(TypeError) as exc:
            np.frexp(3.0 * u.m / u.s)
        assert exc.value.args[0] == ("Can only apply 'frexp' function to " "unscaled dimensionless quantities")

        # also does not work on quantities that can be made dimensionless
        with pytest.raises(TypeError) as exc:
            np.frexp(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm))
        assert exc.value.args[0] == ("Can only apply 'frexp' function to " "unscaled dimensionless quantities")
Пример #7
0
def saveArray(filename, data):
    # https://gist.github.com/edouardp/3089602
    f = open(filename, "wb")
    f.write("#?RADIANCE\n# Made with Python & Numpy\nFORMAT=32-bit_rle_rgbe\n\n")
    f.write("-Y {0} +X {1}\n".format(data.shape[0], data.shape[1]))
    brightest = np.maximum(np.maximum(data[...,0], data[...,1]), data[...,2])
    exp = np.zeros_like(brightest)
    man = np.zeros_like(brightest)
    np.frexp(brightest, man, exp)
    scman = np.nan_to_num(man * 256.0 / brightest)
    rgbe = np.zeros((data.shape[0], data.shape[1], 4), dtype=np.uint8)
    rgbe[...,0:3] = np.minimum(np.maximum(np.around(data[...,0:3] * scman[...,None]), 0), 255)
    rgbe[...,3] =np.minimum(np.maximum(np.around(exp + 128), 0), 255)
    rgbe.flatten().tofile(f)
    f.close()
Пример #8
0
def expm(A):
# EXPM   Matrix exponential.
#   EXPM(X) is the matrix exponential of X.  EXPM is computed using
#   a scaling and squaring algorithm with a Pade approximation.
#
# Julia implementation closely based on MATLAB code by Nicholas Higham
#

# Initialization
    m_vals, theta = expmchk()

    normA = 0
    if issparse(A): normA = np.amax((A.multiply(A.sign())).sum(0)) 
    else:           normA = nlin.norm(A,1) 
    
    if normA <= theta[-1]:
        # no scaling and squaring is required.
        for i in range(len(m_vals)):
            if normA <= theta[i]:
                F = PadeApproximantOfDegree(A, m_vals[i])
                break
    else:
        t,s = frexp(normA/float(theta[-1]))
        s = s - (t == 0.5) # adjust s if normA/theta(end) is a power of 2.
        A = A/(2.0**s)     # Scaling
        F = PadeApproximantOfDegree(A, m_vals[-1])
        
        for i in range(s):
            if issparse(A): F = F*F
            else:           F = np.dot(F,F)   

    return F
Пример #9
0
def _sp_expm(qo):
    """
    Sparse matrix exponential of a quantum operator.
    Called by the Qobj expm method.
    """
    A = qo.data.tocsc()  # extract Qobj data (sparse matrix)
    m_vals = np.array([3, 5, 7, 9, 13])
    theta = np.array([0.01495585217958292, 0.2539398330063230,
                      0.9504178996162932, 2.097847961257068,
                      5.371920351148152], dtype=float)
    normA = _sp_one_norm(qo)
    if normA <= theta[-1]:
        for ii in range(len(m_vals)):
            if normA <= theta[ii]:
                F = _pade(A, m_vals[ii])
                break
    else:
        t, s = np.frexp(normA / theta[-1])
        s = s - (t == 0.5)
        A = A / 2.0 ** s
        F = _pade(A, m_vals[-1])
        for i in range(s):
            F = F * F

    return F
Пример #10
0
def frexp(x):
    tmp = elemwise(np.frexp, x)
    left = next(names)
    right = next(names)
    ldsk = dict(((left,) + key[1:], (getitem, key, 0))
                for key in core.flatten(tmp._keys()))
    rdsk = dict(((right,) + key[1:], (getitem, key, 1))
                for key in core.flatten(tmp._keys()))

    if x._dtype is not None:
        a = np.empty((1,), dtype=x._dtype)
        l, r = np.frexp(a)
        ldt = l.dtype
        rdt = r.dtype
    else:
        ldt = None
        rdt = None

    L = Array(merge(tmp.dask, ldsk), left, blockdims=tmp.blockdims,
                dtype=ldt)

    R = Array(merge(tmp.dask, rdsk), right, blockdims=tmp.blockdims,
                dtype=rdt)

    return L, R
Пример #11
0
def pSpectrum(data=None, samplefreq=44100):
    npts = len(data)
# we should window the data here
    if npts == 0:
        print "? no data in pSpectrum"
        return
# pad to the nearest higher power of 2
    (a,b) = numpy.frexp(npts)
    if a <= 0.5:
        b = b = 1
    npad = 2**b -npts
    if debugFlag:
        print "npts: %d   npad: %d   npad+npts: %d" % (npts, npad, npad+npts)
    padw =  numpy.append(data, numpy.zeros(npad))
    npts = len(padw)
    sigfft = spFFT.fft(padw)
    nUniquePts = numpy.ceil((npts+1)/2.0)
    sigfft = sigfft[0:nUniquePts]
    spectrum = abs(sigfft)
    spectrum = spectrum / float(npts) # scale by the number of points so that
                       # the magnitude does not depend on the length
                       # of the signal or on its sampling frequency
    spectrum = spectrum**2  # square it to get the power
    spmax = numpy.amax(spectrum)
    spectrum = spectrum + 1e-12*spmax
    # multiply by two (see technical document for details)
    # odd nfft excludes Nyquist point
    if npts % 2 > 0: # we've got odd number of points fft
        spectrum[1:len(spectrum)] = spectrum[1:len(spectrum)] * 2
    else:
        spectrum[1:len(spectrum) -1] = spectrum[1:len(spectrum) - 1] * 2 # we've got even number of points fft
    freqAzero = numpy.arange(0, nUniquePts, 1.0) * (samplefreq / npts)
    return(spectrum, freqAzero)
Пример #12
0
    def test_half_ufuncs(self):
        """Test the various ufuncs"""

        a = np.array([0, 1, 2, 4, 2], dtype=float16)
        b = np.array([-2, 5, 1, 4, 3], dtype=float16)
        c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)

        assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
        assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
        assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
        assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])

        assert_equal(np.equal(a, b), [False, False, False, True, False])
        assert_equal(np.not_equal(a, b), [True, True, True, False, True])
        assert_equal(np.less(a, b), [False, True, False, False, True])
        assert_equal(np.less_equal(a, b), [False, True, False, True, True])
        assert_equal(np.greater(a, b), [True, False, True, False, False])
        assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
        assert_equal(np.logical_and(a, b), [False, True, True, True, True])
        assert_equal(np.logical_or(a, b), [True, True, True, True, True])
        assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
        assert_equal(np.logical_not(a), [True, False, False, False, False])

        assert_equal(np.isnan(c), [False, False, False, True, False])
        assert_equal(np.isinf(c), [False, False, True, False, False])
        assert_equal(np.isfinite(c), [True, True, False, False, True])
        assert_equal(np.signbit(b), [True, False, False, False, False])

        assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])

        assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
        x = np.maximum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [0, 5, 1, 0, 6])
        assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
        x = np.minimum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [-2, -1, -np.inf, 0, 3])
        assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
        assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
        assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
        assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])

        assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
        assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
        assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
        assert_equal(np.square(b), [4, 25, 1, 16, 9])
        assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
        assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
        assert_equal(np.conjugate(b), b)
        assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
        assert_equal(np.negative(b), [2, -5, -1, -4, -3])
        assert_equal(np.positive(b), b)
        assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
        assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
        assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
        assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
Пример #13
0
def write_hdr(filename, image):
    '''Writes a HDR image into disk. Assumes you have a np.array((height,width,3), dtype=float)
        as your HDR image'''
    f = open(filename, "wb")
    f.write("#?RADIANCE\n# Made with Python & Numpy\nFORMAT=32-bit_rle_rgbe\n\n")
    f.write("-Y {0} +X {1}\n".format(image.shape[0], image.shape[1]))

    brightest = np.maximum(np.maximum(image[...,0], image[...,1]), image[...,2])
    mantissa = np.zeros_like(brightest)
    exponent = np.zeros_like(brightest)
    np.frexp(brightest, mantissa, exponent)
    scaled_mantissa = mantissa * 256.0 / brightest
    rgbe = np.zeros((image.shape[0], image.shape[1], 4), dtype=np.uint8)
    rgbe[..., 0:3] = np.around(image[..., 0:3] * scaled_mantissa[..., None])
    rgbe[..., 3] = np.around(exponent + 128)

    rgbe.flatten().tofile(f)
    f.close()
Пример #14
0
    def test_frexp(self, dtype):
        numpy_a = numpy.array([-300, -20, -10, -1, 0, 1, 10, 20, 300], dtype=dtype)
        numpy_b, numpy_c = numpy.frexp(numpy_a)

        cupy_a = cupy.array(numpy_a)
        cupy_b, cupy_c = cupy.frexp(cupy_a)

        testing.assert_allclose(cupy_b, numpy_b)
        testing.assert_array_equal(cupy_c, numpy_c)
Пример #15
0
def nextfloat(fin):
    """Return (approximately) next float value (for f>0)."""
    d = 2**-52
    split = N.frexp(fin)
    while True:
        fout = N.ldexp(split[0] + d, split[1])
        if fin != fout:
            return fout
        d *= 2
Пример #16
0
def compare_float(var, ref, cut_digit=1, verbose=False):
    # x = mantissa * 2**exponent

    if type(var) != np.ndarray: var = np.array(var)
    if type(ref) != np.ndarray: ref = np.array(ref)

    m_var, e_var = np.frexp(var)
    m_ref, e_ref = np.frexp(ref)

    num_exp_diff = np.count_nonzero(e_var != e_ref)
    if num_exp_diff > 0:
        try:
            # check 0 and 1e-16
            idxs = np.where(e_var != e_ref)
            aa_equal(var[idxs], ref[idxs], 15)
            return True, 15
        except:
            if verbose:
                print("idxs   : ", idxs)
                print("Actual : ", var[idxs])
                print("Desired: ", ref[idxs])
            return False, "The exponents are not same at {} points".format(num_exp_diff)

    num_man_diff = np.count_nonzero(m_var != m_ref)
    if num_man_diff == 0:
        return True, "exact"

    digit = 17
    percents = []
    while(True):
        try:
            aa_equal(m_var, m_ref, digit-1)
            return True, "{}, ({})".format(digit, ', '.join(percents))

        except Exception as e:
            percent = float(re.findall('mismatch (\d+.\S+)%',str(e))[0])
            percents.insert(0, "{}:{:.2f}%".format(digit,percent))
            #print('>>>>', digit, str(e), percent, percents)

            if digit == cut_digit:
                return False, "{}, ({})".format(cut_digit, ', '.join(percents))
            else:
                digit -= 1
Пример #17
0
def test_binary_out():
    args = [1,
            np.ones(2),
            xr.Variable(['x'], [1, 1]),
            xr.DataArray([1, 1], dims='x'),
            xr.Dataset({'y': ('x', [1, 1])})]
    for arg in args:
        actual_mantissa, actual_exponent = np.frexp(arg)
        assert_identical(actual_mantissa, 0.5 * arg)
        assert_identical(actual_exponent, arg)
Пример #18
0
def hadamard(n):
    """
    HADAMARD  Hadamard matrix.
          HADAMARD(N) is a Hadamard matrix of order N, that is,
          a matrix H with elements 1 or -1 such that H*H' = N*EYE(N).
          An N-by-N Hadamard matrix with N>2 exists only if REM(N,4) = 0.
          This function handles only the cases where N, N/12 or N/20
          is a power of 2.

          Reference:
          S.W. Golomb and L.D. Baumert, The search for Hadamard matrices,
             Amer. Math. Monthly, 70 (1963) pp. 12-17.
          http://en.wikipedia.org/wiki/Hadamard_matrix
          Weisstein, Eric W. "Hadamard Matrix." From MathWorld--
             A Wolfram Web Resource:
             http://mathworld.wolfram.com/HadamardMatrix.html
    """

    f, e = np.frexp(np.array([n, n / 12., n / 20.]))

    try:
        # If more than one condition is satified, this will always
        # pick the first one.
        k = [i for i in range(3) if (f == 0.5)[i] and (e > 0)[i]].pop()
    except IndexError:
        raise ValueError('N, N/12 or N/20 must be a power of 2.')

    e = e[k] - 1

    if k == 0:        # N = 1 * 2^e;
        h = np.array([1])

    elif k == 1:      # N = 12 * 2^e;
        tp = rogues.toeplitz(np.array([-1, -1, 1, -1, -1, -1, 1, 1, 1, -1, 1]),
                             np.array([-1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1]))
        h = np.vstack((np.ones((1, 12)), np.hstack((np.ones((11, 1)), tp))))

    elif k == 2:     # N = 20 * 2^e;
        hk = rogues.hankel(
                np.array([-1, -1, 1, 1, -1, -1, -1, -1, 1,
                          -1, 1, -1, 1, 1, 1, 1, -1, -1, 1]),
                np.array([1, -1, -1, 1, 1, -1, -1, -1, -1,
                          1, -1, 1, -1, 1, 1, 1, 1, -1, -1]))
        h = np.vstack((np.ones((1, 20)), np.hstack((np.ones((19, 1)), hk))))

    #  Kronecker product construction.

    mh = -1 * h
    for i in range(e):
        ht = np.hstack((h, h))
        hb = np.hstack((h, mh))
        h = np.vstack((ht, hb))
        mh = -1 * h

    return h
Пример #19
0
    def exercise_frexp_t(self, tp):

        a = np.arange(5, dtype=tp)

        b = np.frexp(a)
        coeff = b[0]
        exp = b[1]

        for i in range(len(coeff)):
            x = coeff[i] * pow(2, exp[i])
            assert x == a[i]
Пример #20
0
def pSpectrum(data=None, samplefreq=44100):
    """Power spectrum computation.

    Compute the power spectrum of a data set using standard ffts, after padding
    the data set to the next higher power of 2. Assumes data is regularly
    sampled in the time domain.

    Parameters
    data : list or numpy array
            the signal for which the power spectrume will be computed
    arg2 : float
        The sample frequency for the data set, in Hz

    Returns
    -------
    (powerspectrum, frequency)
        Tuple of numpy arrays with the computed power spectrum, and
        the associated frequency arrays (in Hz)

    """
    npts = len(data)
# we should window the data here
    if npts == 0:
        print( "? no data in pSpectrum")
        return
# pad to the nearest higher power of 2
    (a,b) = np.frexp(npts)
    if a <= 0.5:
        b = b = 1
    npad = 2**b -npts
    if debugFlag:
        print("npts: %d   npad: %d   npad+npts: %d" % (npts, npad, npad+npts))
    padw =  np.append(data, np.zeros(npad))
    npts = len(padw)
    sigfft = spFFT.fft(padw)
    nUniquePts = int(np.ceil((npts+1)/2.0))
#    print nUniquePts
    sigfft = sigfft[0:nUniquePts]
    spectrum = abs(sigfft)
    spectrum = spectrum / float(npts) # scale by the number of points so that
                       # the magnitude does not depend on the length
                       # of the signal or on its sampling frequency
    spectrum = spectrum**2  # square it to get the power
    spmax = np.amax(spectrum)
    spectrum = spectrum + 1e-12*spmax
    # multiply by two (see technical document for details)
    # odd nfft excludes Nyquist point
    if npts % 2 > 0: # we've got odd number of points fft
        spectrum[1:len(spectrum)] = spectrum[1:len(spectrum)] * 2
    else:
        spectrum[1:len(spectrum) -1] = spectrum[1:len(spectrum) - 1] * 2 # we've got even number of points fft
    freqAzero = np.arange(0, nUniquePts, 1.0) * (samplefreq / npts)
    return(spectrum, freqAzero)
def print_signum_latex(segments):
    print "=============\nLaTeX/Desmos signum (not simplified)\n-------------"
    sys.stdout.write("\\frac{{(1-\\operatorname{{signum}}(x-{}))}}{{2}}*{}".format(
        segments[0][XCOORD], segments[0][YCOORD]))
    for i in xrange(1, len(segments)):
        m, e = np.frexp(segments[i][SLOPE])
        sys.stdout.write("+\\frac{{(\\operatorname{{signum}}" +
                         "(x-{})+1)".format(segments[i-1][XCOORD]) +
                         "(1-\\operatorname{{signum}}(x-{}))}}{{4}}".format(segments[i][XCOORD]) +
                         "*({}+(x-{})*{}\\cdot 2^{{{}}})".format(segments[i-1][YCOORD],
                                                                 segments[i-1][XCOORD], m, e))
    print "+\\frac{{(\\operatorname{{signum}}(x-{})+1)}}{{2}}*{}".format(segments[-1][XCOORD],
                                                                         segments[-1][YCOORD])
Пример #22
0
def from_npfloat(x, prec=113, rnd=round_fast):
    """Create a raw mpf from a numpy float, rounding if necessary.
    If prec >= 113, the result is guaranteed to represent exactly the
    same number as the input. If prec is not specified, use prec=113."""
    y = float(x)
    if x == y: # ldexp overflows for float16
        return from_float(y, prec, rnd)
    import numpy as np
    if np.isfinite(x):
        m, e = np.frexp(x)
        return from_man_exp(int(np.ldexp(m, 113)), int(e-113), prec, rnd)
    if np.isposinf(x): return finf
    if np.isneginf(x): return fninf
    return fnan
Пример #23
0
    def encode(self, data):
        data = np.array(data)
        s = MuLaw.scale * data
        s = np.minimum(abs(s), MuLaw.clip)
        [f,e] = np.frexp(s + MuLaw.bias)

        step  = np.floor(32 * f) - 16   # 4 bits
        chord = e - 8                   # 3 bits
        sgn   = (MuLaw.sign(data) == 1) # 1 bit

        mu = 16 * chord + step # 7-bit coding
        mu = 127 - mu          # bits inversion
        mu = 128 * sgn + mu    # final 8-bit coding
	
        return np.array(mu, dtype=np.uint8)
Пример #24
0
def frexp(x):
    # Not actually object dtype, just need to specify something
    tmp = elemwise(np.frexp, x, dtype=object)
    left = 'mantissa-' + tmp.name
    right = 'exponent-' + tmp.name
    ldsk = {(left,) + key[1:]: (getitem, key, 0)
            for key in core.flatten(tmp.__dask_keys__())}
    rdsk = {(right,) + key[1:]: (getitem, key, 1)
            for key in core.flatten(tmp.__dask_keys__())}

    a = np.empty((1, ), dtype=x.dtype)
    l, r = np.frexp(a)
    ldt = l.dtype
    rdt = r.dtype

    L = Array(sharedict.merge(tmp.dask, (left, ldsk)), left, chunks=tmp.chunks, dtype=ldt)
    R = Array(sharedict.merge(tmp.dask, (right, rdsk)), right, chunks=tmp.chunks, dtype=rdt)
    return L, R
Пример #25
0
def round_to_n(x, n):
    if not (type(n) is int or np.issubdtype(n, np.integer)):
        raise TypeError("RoundToSigFigs: sigfigs must be an integer.")

    if not np.all(np.isreal(x)):
        raise TypeError("RoundToSigFigs: all x must be real.")

    if n <= 0:
        raise ValueError("RoundtoSigFigs: sigfigs must be positive.")

    mantissas, binary_exps = np.frexp(x)

    decimal_exps = __logBase10of2 * binary_exps
    intParts = np.floor(decimal_exps)

    mantissas *= 10.0 ** (decimal_exps - intParts)

    return np.around(mantissas, decimals=n - 1) * 10.0 ** intParts
Пример #26
0
    def round_to_sigfigs(x, sigfigs):
        """
        N.B Stolen from stack overflow:
        http://stackoverflow.com/questions/18915378/rounding-to-significant-figures-in-numpy

        Rounds the value(s) in x to the number of significant figures in sigfigs.
        Restrictions:
        sigfigs must be an integer type and store a positive value.
        x must be a real value or an array like object containing only real values.
        """
        #The following constant was computed in maxima 5.35.1 using 64 bigfloat digits of precision
        __logBase10of2 = 3.010299956639811952137388947244930267681898814621085413104274611e-1
        if not ( type(sigfigs) is int or np.issubdtype(sigfigs, np.integer)):
            raise TypeError( "RoundToSigFigs: sigfigs must be an integer." )
        if not np.all(np.isreal( x )):
            raise TypeError( "RoundToSigFigs: all x must be real." )
        if sigfigs <= 0:
            raise ValueError( "RoundtoSigFigs: sigfigs must be positive." )
        mantissas, binaryExponents = np.frexp(x)
        decimalExponents = __logBase10of2 * binaryExponents
        intParts = np.floor(decimalExponents)
        mantissas *= 10.0**(decimalExponents - intParts)
        return np.around(mantissas, decimals=sigfigs - 1 ) * 10.0**intParts
Пример #27
0
def sp_expm(data, sparse=True):
    """
    Sparse matrix exponential.
    """
    A = data.tocsc()  # extract Qobj data (sparse matrix)
    m_vals = np.array([3, 5, 7, 9, 13])
    theta = np.array([0.01495585217958292, 0.2539398330063230,
                      0.9504178996162932, 2.097847961257068,
                      5.371920351148152], dtype=float)
    normA = sp_one_norm(data)
    if normA <= theta[-1]:
        for ii in range(len(m_vals)):
            if normA <= theta[ii]:
                F = _pade(A, m_vals[ii], sparse)
                break
    else:
        t, s = np.frexp(normA / theta[-1])
        s = s - (t == 0.5)
        A = A / 2.0 ** s
        F = _pade(A, m_vals[-1], sparse)
        for i in range(s):
            F = F * F

    return F
Пример #28
0
def pbdesign(n, keep=None):
    """
    Generate a Plackett-Burman design
    
    Parameter
    ---------
    n : int
        The multiple of 4 higher than the number of factors.
    
    Optional
    --------
    keep : int
        The actual number of factors that the matrix will be used for 
        (default: n).
    
    Returns
    -------
    H : 2d-array
        An orthogonal design matrix with n rows, and (n-1) columns.
    
    Example
    -------
    Create a 5-factor design::
    
        >>> pbdesign(8)  # since 8 is the next heigher multiple of 4
        array([[ 1.,  1.,  1.,  1.,  1.,  1.,  1.],
               [-1.,  1., -1.,  1., -1.,  1., -1.],
               [ 1., -1., -1.,  1.,  1., -1., -1.],
               [-1., -1.,  1.,  1., -1., -1.,  1.],
               [ 1.,  1.,  1., -1., -1., -1., -1.],
               [-1.,  1., -1., -1.,  1., -1.,  1.],
               [ 1., -1., -1., -1., -1.,  1.,  1.],
               [-1., -1.,  1., -1.,  1.,  1., -1.]])

    And if we only want to keep the needed five columns::
    
        >>> pbdesign(8, keep=5)
        array([[ 1.,  1.,  1.,  1.,  1.],
               [-1.,  1., -1.,  1., -1.],
               [ 1., -1., -1.,  1.,  1.],
               [-1., -1.,  1.,  1., -1.],
               [ 1.,  1.,  1., -1., -1.],
               [-1.,  1., -1., -1.,  1.],
               [ 1., -1., -1., -1., -1.],
               [-1., -1.,  1., -1.,  1.]])
        
    
    """
    f, e = np.frexp([n, n/12., n/20.])
    k = [idx for idx, val in enumerate(np.logical_and(f==0.5, e>0)) if val]
    
    assert isinstance(n, int) and k!=[], 'Invalid inputs. n must be a multiple of 4.'
    
    k = k[0]
    e = e[k] - 1
    
    if k==0:  # N = 1*2**e
        H = np.ones((1, 1))
    elif k==1:  # N = 12*2**e
        H = np.vstack((np.ones((1, 12)), np.hstack((np.ones((11, 1)), 
            toeplitz([-1, -1, 1, -1, -1, -1, 1, 1, 1, -1, 1],
                     [-1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1])))))
    elif k==2:  # N = 20*2**e
        H = np.vstack((np.ones((1, 20)), np.hstack((np.ones((19, 1)),
            hankel(
            [-1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1],
            [1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1])
            ))))
    
    # Kronecker product construction
    for i in xrange(e):
        H = np.vstack((np.hstack((H, H)), np.hstack((H, -H))))
    
    if keep is not None:
        assert keep<=(H.shape[1]-1), 'Too many variables specified in "keep" for matrix'
        return H[:, 1:(keep + 1)]
    else:
        return H[:, 1:]
Пример #29
0
 def test_ufunc_two_outputs(self):
     mantissa, exponent = np.frexp(2**-3)
     expected = (ArrayLike(mantissa), ArrayLike(exponent))
     _assert_equal_type_and_value(np.frexp(ArrayLike(2**-3)), expected)
     _assert_equal_type_and_value(np.frexp(ArrayLike(np.array(2**-3))),
                                  expected)
Пример #30
0
def ValueWithUncsRounding( x, uncs, uncsigfigs=1 ):
    """
    Rounds all of the values in uncs (the uncertainties) to the number of
    significant figures in uncsigfigs. Then
    rounds the values in x to the same decimal pace as the values in uncs.
    Return value is a two element tuple each element of which has the same
    type as x and uncs, respectively.

    Restrictions:
    - uncsigfigs must be a positive integer. 
    
    - x must be a real value or an array like object containing only real
      values.
    - uncs must be a real value or an array like object containing only real
      values.
    """
    if not ( type(uncsigfigs) is int or type(uncsigfigs) is long or
             isinstance(uncsigfigs, np.integer) ):
        raise TypeError(
            "ValueWithUncsRounding: uncsigfigs must be an integer." )

    if uncsigfigs <= 0:
        raise ValueError(
            "ValueWithUncsRounding: uncsigfigs must be positive." )

    if not np.all(np.isreal( x )):
        raise TypeError(
            "ValueWithUncsRounding: all x must be real." )

    if not np.all(np.isreal( uncs )):
        raise TypeError(
            "ValueWithUncsRounding: all uncs must be real." )

    if np.any( uncs <= 0 ):
        raise ValueError(
            "ValueWithUncsRounding: uncs must all be positive." )

    #temporarily suppres floating point errors
    errhanddict = np.geterr()
    np.seterr(all="ignore")

    matrixflag = False
    if isinstance(x, np.matrix): #Convert matrices to arrays
        matrixflag = True
        x = np.asarray(x)

    #Pre-round unc to correctly handle cases where rounding alters the
    # most significant digit of unc.
    uncs = RoundToSigFigs_fp( uncs, uncsigfigs )
    
    mantissas, binaryExponents = np.frexp( uncs )
    
    decimalExponents = __logBase10of2 * binaryExponents
    omags = np.floor(decimalExponents)

    mantissas *= 10.0**(decimalExponents - omags)
    if type(mantissas) is float or np.issctype(np.dtype(mantissas)):
        if mantissas < 1.0:
            mantissas *= 10.0
            omags -= 1.0
            
    else: #elif np.all(np.isreal( mantissas )):
        fixmsk = mantissas < 1.0
        mantissas[fixmsk] *= 10.0
        omags[fixmsk] -= 1.0

    scales = 10.0**omags

    prec = uncsigfigs - 1
    result =  ( np.around( x / scales, decimals=prec ) * scales,
                np.around( mantissas, decimals=prec ) * scales )
    if matrixflag:
        result = np.matrix(result, copy=False)

    np.seterr(**errhanddict)
    return result
Пример #31
0
    # construct the response curve and plot the figure
    exp = [
        32, 16, 8, 4, 2, 1, 0.5, 0.25, 0.125, 0.0625, 0.03125, 0.015625,
        0.007125, 0.00390625, 0.001953125, 0.00097656525
    ]
    cur = Constructing.ResponseCurve(num, img, exp)
    hdr = Constructing.RadianceMap(num, img, exp, cur)
    print(np.array(hdr)[:, :, 0])

    # np.save("hdr", np.array(hdr))
    # hdr = np.load("hdr.npy")

    # create hdr resultant image (brighter = mantissa * 2 ^ exponent)
    brighter = np.max(hdr, axis=2)
    mantissa = np.zeros_like(brighter)
    exponent = np.zeros_like(brighter)
    np.frexp(brighter, mantissa, exponent)
    rgbvalue = mantissa * 256.0 / brighter

    res = np.zeros((np.shape(hdr)[0], np.shape(hdr)[1], 4), dtype=np.uint8)
    res[:, :, 0] = np.around(hdr[:, :, 2] * rgbvalue)
    res[:, :, 1] = np.around(hdr[:, :, 1] * rgbvalue)
    res[:, :, 2] = np.around(hdr[:, :, 0] * rgbvalue)
    res[:, :, 3] = np.around(exponent + 128)

    fin = np.zeros((np.shape(hdr)[0], np.shape(hdr)[1], 3))
    fin[:, :, 0] = res[:, :, 2] * np.power(2, exponent)
    fin[:, :, 1] = res[:, :, 1] * np.power(2, exponent)
    fin[:, :, 2] = res[:, :, 0] * np.power(2, exponent)

    ImageFileIO.Write(1, ".", fin)
Пример #32
0
def compress_float_array(arr: np.array) -> np.array:
    """
    Takes a numpy array with dtype.kind = 'f',
    and checks to see which bits are used in the float,
    then returns the compressed array as a float16, float32, or float64
    in a loss-less format
    :param arr: numpy array of floats
    :return: compressed array
    """
    if arr.dtype.kind != 'f':
        raise ArrayNotFloatException
    # filter out NaN values
    not_nan = arr[~np.isnan(arr)]
    if not_nan.size == 0:
        return arr.astype(np.float16)

    # put the array into a byte array
    bytes_dtype = np.dtype([('byte_{}'.format(i), np.uint8)
                            for i in range(0, arr.itemsize)])
    byte_array = np.frombuffer(not_nan.tobytes(), dtype=bytes_dtype)
    byte_arr_len = byte_array['byte_0'].shape[0]
    # mantissa is X bits long, with left-right having value of
    # 0.5 * 2^(i)
    big_endian_nth_byte = dict([('byte_{}'.format(i),
                                 'byte_{}'.format(arr.itemsize - i - 1))
                                for i in range(0, arr.itemsize)])
    if arr.itemsize == 8:
        # numpy 64-bit float has 52 bits of mantissa
        # in order to compress to 32-bit float, we need to pack mantissa into
        # 23 bits. This means we need 52-23=29 bits of zeros on the end.
        # that means that the 3 right-most bytes must be zero
        # and that the 4th right-most byte must have 5 right-most bits = 0
        # we'll check this by seeing if '0001 1111' & byte == 0. (0001 1111 is 31)
        # finally, we'll need to check the exponent compression

        # first check the 4th right-most byte. it's most likely to have the
        # significant bits we can't drop
        bitwise_reduction = np.bitwise_and(
            byte_array[big_endian_nth_byte['byte_4']],
            np.full(byte_arr_len, 31, dtype=np.uint8))
        if np.count_nonzero(bitwise_reduction) > 0:
            return arr
        # next check the remaining bits
        if np.count_nonzero(byte_array[big_endian_nth_byte['byte_5']]) > 0 \
            or np.count_nonzero(byte_array[big_endian_nth_byte['byte_6']]) > 0 \
                or np.count_nonzero(byte_array[big_endian_nth_byte['byte_7']]) > 0:
            return arr
        # finally check the exponent. in 64-bit float, exponent can be +/- 1024.
        # in 32-bit float, exponent can be +/-128
        # do this the easy way and get this from numpy
        bitwise_exponent = np.frexp(not_nan)[1]
        if np.amax(bitwise_exponent) > 128 or np.amin(bitwise_exponent) < -128:
            return arr

        # else, we're good to go onto 32-bit reduction
        return compress_float_array(arr.astype(np.float32))

    if arr.itemsize == 4:
        # numpy 32-bit float has 23 bits of mantissa.
        # numpy 16-bit float has 10 bits mantissa.
        # we require the right-most 13-bits to be zero. byte_3 (4th byte)
        # must be all zero, and byte_2 (3rd byte) must have
        # the 5 right-most bits be zero. We will bitwise-and against 31
        # (0001 1111) to see if it complies
        bitwise_reduction = np.bitwise_and(
            byte_array[big_endian_nth_byte['byte_2']],
            np.full(byte_arr_len, 31, dtype=np.uint8))
        if np.count_nonzero(bitwise_reduction) > 0:
            return arr
        # next check the 4th byte for zeros
        if np.count_nonzero(byte_array[big_endian_nth_byte['byte_3']]) > 0:
            return arr
        # finally, check exponent. In 32-bit float, exponent can be +/- 128
        # in 16-bit float, exponent can be +/-16
        bitwise_exponent = np.frexp(not_nan)[1]
        if np.amax(bitwise_exponent) > 16 or np.amin(bitwise_exponent) < -16:
            return arr
        # else, return compressed
        return arr.astype(np.float16)

    return arr
Пример #33
0
    def test_half_ufuncs(self):
        """Test the various ufuncs"""

        a = np.array([0, 1, 2, 4, 2], dtype=float16)
        b = np.array([-2, 5, 1, 4, 3], dtype=float16)
        c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)

        assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
        assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
        assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
        assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])

        assert_equal(np.equal(a, b), [False, False, False, True, False])
        assert_equal(np.not_equal(a, b), [True, True, True, False, True])
        assert_equal(np.less(a, b), [False, True, False, False, True])
        assert_equal(np.less_equal(a, b), [False, True, False, True, True])
        assert_equal(np.greater(a, b), [True, False, True, False, False])
        assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
        assert_equal(np.logical_and(a, b), [False, True, True, True, True])
        assert_equal(np.logical_or(a, b), [True, True, True, True, True])
        assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
        assert_equal(np.logical_not(a), [True, False, False, False, False])

        assert_equal(np.isnan(c), [False, False, False, True, False])
        assert_equal(np.isinf(c), [False, False, True, False, False])
        assert_equal(np.isfinite(c), [True, True, False, False, True])
        assert_equal(np.signbit(b), [True, False, False, False, False])

        assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])

        assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])

        x = np.maximum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [0, 5, 1, 0, 6])

        assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])

        x = np.minimum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [-2, -1, -np.inf, 0, 3])

        assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
        assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
        assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
        assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])

        assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
        assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
        assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
        assert_equal(np.square(b), [4, 25, 1, 16, 9])
        assert_equal(np.reciprocal(b),
                     [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
        assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
        assert_equal(np.conjugate(b), b)
        assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
        assert_equal(np.negative(b), [2, -5, -1, -4, -3])
        assert_equal(np.positive(b), b)
        assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
        assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
        assert_equal(np.frexp(b),
                     ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
        assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
Пример #34
0
def main():
    parser = create_parser()
    args = parser.parse_args()

    if args.params_json:
        with open(args.params_json, "r") as f:
            models_params = json.load(f)
    else:
        models_params = {}
    print(models_params)

    fft_lut_file = args.fft_lut_file if not "fft_lut_file" in models_params else models_params[
        "fft_lut_file"]
    mfcc_bf_lut_file = args.mfcc_bf_lut_file if not "mfcc_bf_lut_file" in models_params else models_params[
        "mfcc_bf_lut_file"]
    use_tf_mfcc = args.use_tf_mfcc if not "use_tf_mfcc" in models_params else models_params[
        "use_tf_mfcc"]
    use_librosa = args.use_librosa if not "use_librosa" in models_params else models_params[
        "use_librosa"]
    sample_rate = args.sample_rate if not "sample_rate" in models_params else models_params[
        "sample_rate"]
    frame_size = args.frame_size if not "frame_size" in models_params else models_params[
        "frame_size"]
    frame_step = args.frame_step if not "frame_step" in models_params else models_params[
        "frame_step"]
    window_fn = args.win_func if not "win_func" in models_params else models_params[
        "win_func"]
    gen_inv = args.gen_inv if not "gen_inv" in models_params else models_params[
        "gen_inv"]
    name_suffix = args.name_suffix if not "name_suffix" in models_params else models_params[
        "name_suffix"]

    n_fft = args.n_fft if not "n_fft" in models_params else models_params[
        "n_fft"]

    fmax = args.fmax if not "fmax" in models_params else models_params["fmax"]
    fmin = args.fmin if not "fmin" in models_params else models_params["fmin"]
    librosa_mel_norm = args.librosa_mel_norm if not "librosa_mel_norm" in models_params else models_params[
        "librosa_mel_norm"]
    mfcc_bank_cnt = args.mfcc_bank_cnt if not "mfcc_bank_cnt" in models_params else models_params[
        "mfcc_bank_cnt"]

    n_dct = args.n_dct if not "n_dct" in models_params else models_params[
        "n_dct"]
    dct_type = args.dct_type if not "dct_type" in models_params else models_params[
        "dct_type"]
    dct_norm = args.dct_norm if not "dct_norm" in models_params else models_params[
        "dct_norm"]
    lifter_coeff = args.lifter_coeff if not "lifter_coeff" in models_params else models_params[
        "lifter_coeff"]

    dtype = args.dtype if not "dtype" in models_params else models_params[
        "dtype"]

    mel_filterbanks_file = args.mel_filterbanks_file if not "mel_filterbanks_file" in models_params else models_params[
        "mel_filterbanks_file"]
    dct_matrix_file = args.dct_matrix_file if not "dct_matrix_file" in models_params else models_params[
        "dct_matrix_file"]

    lut_dtype = "int" if dtype == "fix16" or dtype == "fix32_scal" else dtype
    n_fft_int = n_fft if dtype == "fix32_scal" else n_fft // 2

    if lut_dtype == "int":
        data_type = "short int"
    elif lut_dtype == "float16":
        data_type = "F16_DSP"
    elif lut_dtype == "float32":
        data_type = "float"
    else:
        raise NotImplemented(
            f"lut_dtype = {lut_dtype} not implemeted, available ['fix16' 'fix32_scal' 'float32' 'float16']"
        )

    print(lut_dtype, name_suffix, n_fft_int)
    win_func = getattr(np, window_fn)
    win_lut = win_func(frame_size)
    if lut_dtype == "int":
        Window = (win_lut * 2**(15)).astype(np.int16)
    else:
        Window = win_lut.astype(np.float32)

    if gen_inv:
        inv_win_lut = win_lut / (win_lut**2 + 1e-8)
        if lut_dtype == "int":
            base, exp = np.frexp(inv_win_lut)
            InvWindow = np.empty(base.size + exp.size)
            InvWindow[0::2] = np.round(base * 2**8).astype(np.uint8)
            InvWindow[1::2] = exp.astype(np.uint8)
            InvWindow = InvWindow.astype(np.int32)
        else:
            InvWindow = inv_win_lut.astype(np.float32)

    Twiddles_cos, Twiddles_sin = SetupTwiddlesLUT(n_fft_int, dtype=lut_dtype)
    if round(math.log(n_fft_int, 4)) == math.log(n_fft_int, 4):
        SwapTableR4 = SetupSwapTableR4(n_fft_int)
        Twiddles_cosR4, Twiddles_sinR4 = Twiddles_cos[:int(
            3 / 4 * n_fft_int)], Twiddles_sin[:int(3 / 4 * n_fft_int)]
        print("Setting up twiddles for radix 4 ", len(Twiddles_cosR4))

    SwapTableR2 = SetupSwapTable(n_fft_int)
    Twiddles_cosR2, Twiddles_sinR2 = Twiddles_cos[:int(
        n_fft_int // 2)], Twiddles_sin[:int(n_fft_int // 2)]
    print("Setting up twiddles for radix 2 ", len(Twiddles_cosR2))

    RFFTTwiddles_real, RFFTTwiddles_imag = SetupTwiddlesRFFT(n_fft,
                                                             dtype=lut_dtype)

    if n_dct > 0:
        if dct_matrix_file:
            DCT_Coeff = convert_dtype(np.load(dct_matrix_file), lut_dtype,
                                      DCT_TWIDDLE_DYN)
        else:
            DCT_Coeff = SetupDCTTable(n_dct, dct_type, lut_dtype, dct_norm)
        if lifter_coeff > 0:
            Lift_Coeff = SetupLiftCoeff(lifter_coeff, n_dct, lut_dtype)

    ################################ WRITE TO FILE #######################################
    Out_str = array_to_def_c_file(Window,
                                  f"WindowLUT{name_suffix}",
                                  data_type,
                                  frame_size,
                                  elem_in_rows=12)
    if gen_inv:
        if dtype == "int":
            Out_str += array_to_def_c_file(InvWindow,
                                           f"InvWindowLUT{name_suffix}",
                                           "unsigned char",
                                           2 * frame_size,
                                           elem_in_rows=12)
        else:
            Out_str += array_to_def_c_file(InvWindow,
                                           f"InvWindowLUT{name_suffix}",
                                           data_type,
                                           frame_size,
                                           elem_in_rows=12)

    if round(math.log(n_fft_int, 4)) == math.log(
            n_fft_int, 4) and not dtype == "fix32_scal":
        Out_str += array_to_def_c_file(SwapTableR4.astype(np.int16),
                                       f"SwapTable{name_suffix}",
                                       "short int",
                                       n_fft_int,
                                       elem_in_rows=2)
    else:
        Out_str += array_to_def_c_file(SwapTableR2.astype(np.int16),
                                       f"SwapTable{name_suffix}",
                                       "short int",
                                       n_fft_int,
                                       elem_in_rows=2)

    # FFT
    if dtype == "fix32_scal":
        # only rad2 for fix32_scale
        Out_str += "PI_L2 {} TwiddlesLUT{}[{}] = {{\n".format(
            data_type, name_suffix, 2 * len(Twiddles_cosR2))
        for i in range(len(Twiddles_cosR2)):
            Out_str += "\t {:>6}, {:>6}, \n".format(Twiddles_cosR2[i],
                                                    Twiddles_sinR2[i])
        Out_str += "\n};\n\n"
    else:
        if round(math.log(n_fft_int, 4)) == math.log(n_fft_int, 4):
            Out_str += "PI_L2 {} TwiddlesLUT{}[{}] = {{\n".format(
                data_type, name_suffix, 2 * len(Twiddles_cosR4))
            for i in range(len(Twiddles_cosR4)):
                Out_str += "\t {:>6}, {:>6}, \n".format(
                    Twiddles_cosR4[i], Twiddles_sinR4[i])
            Out_str += "\n};\n\n"
        else:
            Out_str += "PI_L2 {} TwiddlesLUT{}[{}] = {{\n".format(
                data_type, name_suffix, 2 * len(Twiddles_cosR2))
            for i in range(len(Twiddles_cosR2)):
                Out_str += "\t {:>6}, {:>6}, \n".format(
                    Twiddles_cosR2[i], Twiddles_sinR2[i])
            Out_str += "\n};\n\n"
        Out_str += "PI_L2 {} RFFTTwiddlesLUT{}[{}] = {{\n".format(
            data_type, name_suffix, 2 * len(RFFTTwiddles_real))
        for i in range(len(RFFTTwiddles_real)):
            Out_str += "\t {:>6}, {:>6}, \n".format(RFFTTwiddles_real[i],
                                                    RFFTTwiddles_imag[i])
        Out_str += "\n};\n\n"

    # DCT
    if n_dct > 0:
        Out_str += array_to_def_c_file(DCT_Coeff.flatten(),
                                       f"DCT_Coeff{name_suffix}",
                                       data_type,
                                       n_dct * n_dct,
                                       elem_in_rows=n_dct)
        if lifter_coeff > 0:
            Out_str += array_to_def_c_file(Lift_Coeff.flatten(),
                                           f"Lift_Coeff{name_suffix}",
                                           data_type,
                                           n_dct,
                                           elem_in_rows=n_dct)

    with open(fft_lut_file, 'w') as f:
        f.write(Out_str)

    if mfcc_bf_lut_file:
        #####################################################################################
        # MFCC
        if mel_filterbanks_file:
            filters = np.load(mel_filterbanks_file)
        elif use_tf_mfcc:
            from SetupLUT import GenMFCC_FB_tf
            filters = GenMFCC_FB_tf(n_fft,
                                    mfcc_bank_cnt,
                                    Fmin=fmin,
                                    Fmax=fmax,
                                    sample_rate=sample_rate,
                                    dtype=lut_dtype)
        elif use_librosa:
            from SetupLUT import GenMFCC_FB_librosa
            filters = GenMFCC_FB_librosa(n_fft,
                                         mfcc_bank_cnt,
                                         Fmin=fmin,
                                         Fmax=fmax,
                                         sample_rate=sample_rate,
                                         norm=librosa_mel_norm,
                                         dtype=lut_dtype)
        else:
            from SetupLUT import GenMFCC_FB
            filters = GenMFCC_FB(n_fft,
                                 mfcc_bank_cnt,
                                 Fmin=fmin,
                                 Fmax=fmax,
                                 sample_rate=sample_rate,
                                 dtype=lut_dtype)

        MfccLUT, HeadCoeff = GenMelFilterBanksCode(filters, mfcc_bank_cnt,
                                                   fmin, fmax, lut_dtype,
                                                   data_type, name_suffix)

        with open(mfcc_bf_lut_file, "w") as f:
            f.write(MfccLUT)

    if args.save_params_header:
        with open(args.save_params_header, "w") as f:
            f.write("#define\t{:21}{:>10}\n".format("SAMPLERATE", sample_rate))
            f.write("#define\t{:21}{:>10}\n".format("FRAME_SIZE", frame_size))
            f.write("#define\t{:21}{:>10}\n".format("FRAME_STEP", frame_step))
            f.write("#define\t{:21}{:>10}\n".format("N_FFT", n_fft))
            f.write("#define\t{:21}{:>10}\n".format(
                "DATA_TYPE", 2 if dtype == "float16" else
                (3 if dtype == "float32" else
                 (1 if dtype == "fix32_scal" else 0))))
            if mfcc_bf_lut_file:
                f.write("#define\t{:21}{:>10}\n".format(
                    "MFCC_BANK_CNT", mfcc_bank_cnt))
                f.write("#define\t{:21}{:>10}\n".format("FMIN", fmin))
                f.write("#define\t{:21}{:>10}\n".format("FMAX", fmax))
                f.write("#define\t{:21}{:>10}\n".format(
                    "MFCC_COEFF_CNT", HeadCoeff + 1))
                f.write("#define\t{:21}{:>10}\n".format("N_DCT", n_dct))
Пример #35
0
 def test_frexp_array(self):
     q = np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.m))
     assert all((_q0, _q1) == np.frexp(_d)
                for _q0, _q1, _d in zip(q[0], q[1], [1. / 3., 1. / 2., 1.]))
Пример #36
0
def test_ufunc_frexp_u(A: dace.uint32[10]):
    Q, R = np.frexp(A)
    return Q, R
Пример #37
0
def FormatValWithUncRounding( x, unc, uncsigfigs=1, sciformat=True ):
    """
    Rounds unc (the uncertainty) to the number of significant figures in
    uncsigfigs. Then rounds the value in x to the same decimal pace as the
    value in unc. Uses the decimal package for maximal accuracy.
    Return value is a tuple containing two strings. The keyword argument
    sciformat will force scientific notation if true.

    Restrictions:
    - uncsigfigs must be a positive integer.
    - x must be a real value or floating point.
    - unc must be a real value or floating point
    """
    if not ( type(uncsigfigs) is int or type(uncsigfigs) is long or
             isinstance(uncsigfigs, np.integer) ):
        raise TypeError(
            "FormatValWithUncRounding: uncsigfigs must be an integer." )

    if uncsigfigs <= 0:
        raise ValueError(
            "FormatValWithUncRounding: uncsigfigs must be positive." )

    if not np.isreal(x):
        raise TypeError(
            "FormatValWithUncRounding: x must be real." )

    if not np.isreal(unc) or unc <= 0.0:
        raise TypeError(
            "FormatValWithUncRounding: unc must be a positive real." )

    if isinstance(x, np.matrix): #Convert matrices to arrays
        x = np.asarray(x)

    #sys.stderr.write("Warning: FormatValWithUncRounding is untested.\n")

    #Pre-round unc to correctly handle cases where rounding alters the
    # most significant digit of unc.
    unc = RoundToSigFigs_fp( unc, uncsigfigs ) 

    xsgn = np.sign(x)
    absx = xsgn * x
    mantissa, binaryExponent = np.frexp( absx )
    uncmant, uncbinExponent = np.frexp( unc )
    
    decimalExponent = __logBase10of2 * binaryExponent
    uncdecExponent = __logBase10of2 * uncbinExponent
    
    omag = np.floor(decimalExponent)
    uncomag = np.floor(uncdecExponent)

    uncmant *= 10**(uncdecExponent - uncomag)
    if not np.isnan(uncmant) and uncmant < 1.0:
        uncmant *= 10.0
        uncomag -= 1.0

    mantissa *= 10**(decimalExponent - omag)
    if not np.isnan(mantissa) and mantissa < 1.0:
        mantissa *= 10.0
        omag -= 1.0

    omagdiff = omag - uncomag
    prec = uncsigfigs - 1 + int(omagdiff)


    mantissa, uncOut = ( np.around( mantissa, decimals=prec ),
                         np.around( unc * 10**(-omag), decimals=prec ) )

    if sciformat:
        def formatter( m, u, e, prec ):
            s = "{:." + str(max(prec, 0)) + "f}e{:+d}"
            return (s.format(m, int(e)), s.format(u, int(e)))
    else:
        def formatter( m, u, e, prec ):
            s = "{:." + str(max(prec, 0)) + "g}"
            return (s.format(m*10.0**e), s.format(u*10**e))
    
    return formatter( mantissa, uncOut, omag, prec )
Пример #38
0
for i in range(0, npoly - 1):
    A[:, i + 1] = A[:, i] * x

A = np.matrix(A)  # Convert A to numpy matrix
d = np.matrix(y).transpose()

# SVD decomposition
u, s, vt = np.linalg.svd(A, False)
sinv = np.matrix(np.diag(
    1.0 / s))  #s comes back as a 1-d array, turn it into a 2-d matrix

fit_params = vt.transpose() * sinv * (u.transpose() * d
                                      )  # Best fit Chebyshev coeffs.
poly_fit = A * fit_params  # Best fit truncated Chebyshev poly.

##############################
#######   PLOTTING     #######
##############################

####################################
#######   Return log2(t)     #######
####################################

# Break down the input number into x = y1*2**y2, where y1 is in [0,1].
y1, y2 = np.frexp(t)

# Take the log2(x)
answer = np.polynomial.chebyshev.chebval(y1, cheb_fit_params)[0] + y2

# Print the result to the user
print("log2(t) = ", answer)
Пример #39
0
def depart(in_min, in_max):
    tmp_l = []
    a = np.frexp(in_min)
    b = np.frexp(in_max)
    tmp_j = 0
    if (in_min < 0) & (in_max > 0):
        if in_min >= -1.0:
            tmp_l.append([in_min, 0])
        else:
            for i in range(1, a[1] + 1):
                tmp_i = np.ldexp(-0.5, i)
                tmp_l.append([tmp_i, tmp_j])
                tmp_j = tmp_i
            if in_min != tmp_j:
                tmp_l.append([in_min, tmp_j])
        tmp_j = 0
        if in_max <= 1.0:
            tmp_l.append([0, in_max])
        else:
            for i in range(1, b[1] + 1):
                tmp_i = np.ldexp(0.5, i)
                tmp_l.append([tmp_j, tmp_i])
                tmp_j = tmp_i
            if in_max != tmp_j:
                tmp_l.append([tmp_j, in_max])
    tmp_j = 0
    if (in_min < 0) & (0 >= in_max):
        if in_min >= -1:
            tmp_l.append([in_min, in_max])
            return tmp_l
        else:
            if in_max > -1:
                tmp_l.append([-1, in_max])
                tmp_j = -1.0
                for i in range(2, a[1] + 1):
                    tmp_i = np.ldexp(-0.5, i)
                    tmp_l.append([tmp_i, tmp_j])
                    tmp_j = tmp_i
                if in_min != tmp_j:
                    tmp_l.append([in_min, tmp_j])
            else:
                if a[1] == b[1]:
                    tmp_l.append([in_min, in_max])
                    return tmp_l
                else:
                    tmp_j = np.ldexp(-0.5, b[1] + 1)
                    tmp_l.append([tmp_j, in_max])
                    if tmp_j != in_min:
                        for i in range(b[1] + 2, a[1] + 1):
                            tmp_i = np.ldexp(-0.5, i)
                            tmp_l.append([tmp_i, tmp_j])
                            tmp_j = tmp_i
                        if in_min != tmp_j:
                            tmp_l.append([in_min, tmp_j])
    tmp_j = 0
    if (in_min >= 0) & (in_max > 0):
        if in_max <= 1:
            tmp_l.append([in_min, in_max])
            return tmp_l
        else:
            if in_min < 1:
                tmp_l.append([in_min, 1])
                tmp_j = 1.0
                for i in range(2, b[1] + 1):
                    tmp_i = np.ldexp(0.5, i)
                    tmp_l.append([tmp_j, tmp_i])
                    tmp_j = tmp_i
                if in_max != tmp_j:
                    tmp_l.append([tmp_j, in_max])
            else:
                if a[1] == b[1]:
                    tmp_l.append([in_min, in_max])
                    return tmp_l
                else:
                    tmp_j = np.ldexp(0.5, a[1] + 1)
                    tmp_l.append([in_min, tmp_j])
                    if tmp_j != in_max:
                        for i in range(a[1] + 2, b[1] + 1):
                            tmp_i = np.ldexp(0.5, i)
                            tmp_l.append([tmp_j, tmp_i])
                            tmp_j = tmp_i
                        if in_max != tmp_j:
                            tmp_l.append([tmp_j, in_max])
    return tmp_l
Пример #40
0
import numpy as np

x= np.arange(9)
y1, y2=np.frexp(x)
print("frexp 9  mantissa:, ", y1)
print("frexp 9  exponent:, ", y2)
print("result: ", y1* 2**y2)
Пример #41
0
def pbdesign(n):
    """
    Generate a Plackett-Burman design
    
    Parameter
    ---------
    n : int
        The number of factors to create a matrix for.
    
    Returns
    -------
    H : 2d-array
        An orthogonal design matrix with n columns, one for each factor, and
        the number of rows being the next multiple of 4 higher than n (e.g.,
        for 1-3 factors there are 4 rows, for 4-7 factors there are 8 rows,
        etc.)
    
    Example
    -------
    
    A 3-factor design::
    
        >>> pbdesign(3)
        array([[-1., -1.,  1.],
               [ 1., -1., -1.],
               [-1.,  1., -1.],
               [ 1.,  1.,  1.]])
       
    A 5-factor design::
    
        >>> pbdesign(5)
        array([[-1., -1.,  1., -1.,  1.],
               [ 1., -1., -1., -1., -1.],
               [-1.,  1., -1., -1.,  1.],
               [ 1.,  1.,  1., -1., -1.],
               [-1., -1.,  1.,  1., -1.],
               [ 1., -1., -1.,  1.,  1.],
               [-1.,  1., -1.,  1., -1.],
               [ 1.,  1.,  1.,  1.,  1.]])
       
    """
    assert n>0, 'Number of factors must be a positive integer'
    keep = int(n)
    n = 4*(int(n/4) + 1)  # calculate the correct number of rows (multiple of 4)
    f, e = np.frexp([n, n/12., n/20.])
    k = [idx for idx, val in enumerate(np.logical_and(f==0.5, e>0)) if val]
    
    assert isinstance(n, int) and k!=[], 'Invalid inputs. n must be a multiple of 4.'
    
    k = k[0]
    e = e[k] - 1
    
    if k==0:  # N = 1*2**e
        H = np.ones((1, 1))
    elif k==1:  # N = 12*2**e
        H = np.vstack((np.ones((1, 12)), np.hstack((np.ones((11, 1)), 
            toeplitz([-1, -1, 1, -1, -1, -1, 1, 1, 1, -1, 1],
                     [-1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1])))))
    elif k==2:  # N = 20*2**e
        H = np.vstack((np.ones((1, 20)), np.hstack((np.ones((19, 1)),
            hankel(
            [-1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1],
            [1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1])
            ))))
    
    # Kronecker product construction
    for i in range(e):
        H = np.vstack((np.hstack((H, H)), np.hstack((H, -H))))
    
    # Reduce the size of the matrix as needed
    H = H[:, 1:(keep + 1)]
    
    return np.flipud(H)
    
    
Пример #42
0
    def func((x, err), chop=1):
        frac = x * m
        k = (frac < 0.5) + 1  # top=1, bot=2
        frac *= k  # normalized
        if term: chop += k * term
        return frac, chop / frac + err

    if not const: return func  # factor <= 96 bits
    return lambda (x, err): func((x, err + const))


s73 = scale_func(1e-73 / 2**-242, 0.2208, 0.2987)
s60 = scale_func(1e+60 / 2**+200, 0.0994, 0.2798)
s25 = scale_func(1e+25 / 2**84, 0.2503)
s12 = scale_func(1e+12 / 2**40)
si = np.frexp(10.**np.arange(1, 13))[0]


def abs_err((x, err), scales, x0_dec_err=None):
    frac = x
    for mi in scales:
        xf = x * mi
        xf *= (xf < 0.5) + 1
        frac = xf if frac is x else np.maximum(frac, xf)
    err = err * frac  # rel err -> abs err
    chop = bool(len(scales))
    if x0_dec_err is not None:  # Add dec_err generated ULP
        err = err + np.floor(x0_dec_err * frac)
    if SHOW_ABS_ERR:
        for i in err:
            print i + chop
Пример #43
0
 ('tanh', 'double(double)', np.tanh),
 ('arcsinh', 'double(double)', np.arcsinh),
 ('arccosh', 'double(double)', np.arccosh),
 ('arctanh', 'double(double)', np.arctanh),
 # Exp-log functions:
 ('exp', 'double(double)', np.exp),
 ('expm1', 'double(double)', np.expm1),
 ('exp2', 'double(double)', np.exp2),
 ('log', 'double(double)', np.log),
 ('log10', 'double(double)', np.log10),
 ('log2', 'double(double)', np.log2),
 ('log1p', 'double(double)', np.log1p),
 ('logaddexp', 'double(double, double)', np.logaddexp),
 ('logaddexp2', 'double(double, double)', np.logaddexp2),
 ('ldexp', 'double(double, int)', np.ldexp),
 ('frexp0', 'double(double)', lambda x: np.frexp(x)[0]),
 # Rounding functions:
 ('around', 'double(double)', lambda x: np.around(x)),
 (
     'round2',  # round and round_ are not good names
     'double(double)',
     lambda x: np.round_(x)),  # force arity to 1
 ('floor', 'double(double)', np.floor),
 ('ceil', 'double(double)', np.ceil),
 ('trunc', 'double(double)', np.trunc),
 ('rint', 'double(double)', np.rint),
 ('spacing', 'double(double)', np.spacing),
 ('nextafter', 'double(double, double)', np.nextafter),
 # Rational functions:
 ('gcd', 'int(int, int)', np.gcd),
 ('lcm', 'int(int, int)', np.lcm),
Пример #44
0
 def g(x):
     x, exp = numpy.frexp(x)
     return numpy.array(utils.poly.eval_multi(p, mpmath.matrix(x)),
                        numpy.double) * post_factor[exp - exp0]
Пример #45
0
    def iterate(self):
        """Perform one step in the algorithm.

        Implements Algorithm 4.1(k=1) or 4.2(k=2) in [APS1995]
        """
        self.iterations += 1
        eps = np.finfo(float).eps
        d, fd, e, fe = self.d, self.fd, self.e, self.fe
        ab_width = self.ab[1] - self.ab[0]  # Need the start width below
        c = None

        for nsteps in range(2, self.k+2):
            # If the f-values are sufficiently separated, perform an inverse
            # polynomial interpolation step.  Otherwise nsteps repeats of
            # an approximate Newton-Raphson step.
            if _notclose(self.fab + [fd, fe], rtol=0, atol=32*eps):
                c0 = _inverse_poly_zero(self.ab[0], self.ab[1], d, e,
                                        self.fab[0], self.fab[1], fd, fe)
                if self.ab[0] < c0 < self.ab[1]:
                    c = c0
            if c is None:
                c = _newton_quadratic(self.ab, self.fab, d, fd, nsteps)

            fc = self._callf(c)
            if fc == 0:
                return _ECONVERGED, c

            # re-bracket
            e, fe = d, fd
            d, fd = self._update_bracket(c, fc)

        # u is the endpoint with the smallest f-value
        uix = (0 if np.abs(self.fab[0]) < np.abs(self.fab[1]) else 1)
        u, fu = self.ab[uix], self.fab[uix]

        _, A = _compute_divided_differences(self.ab, self.fab,
                                            forward=(uix == 0), full=False)
        c = u - 2 * fu / A
        if np.abs(c - u) > 0.5 * (self.ab[1] - self.ab[0]):
            c = sum(self.ab) / 2.0
        else:
            if np.isclose(c, u, rtol=eps, atol=0):
                # c didn't change (much).
                # Either because the f-values at the endpoints have vastly
                # differing magnitudes, or because the root is very close to
                # that endpoint
                frs = np.frexp(self.fab)[1]
                if frs[uix] < frs[1 - uix] - 50:  # Differ by more than 2**50
                    c = (31 * self.ab[uix] + self.ab[1 - uix]) / 32
                else:
                    # Make a bigger adjustment, about the
                    # size of the requested tolerance.
                    mm = (1 if uix == 0 else -1)
                    adj = mm * np.abs(c) * self.rtol + mm * self.xtol
                    c = u + adj
                if not self.ab[0] < c < self.ab[1]:
                    c = sum(self.ab) / 2.0

        fc = self._callf(c)
        if fc == 0:
            return _ECONVERGED, c

        e, fe = d, fd
        d, fd = self._update_bracket(c, fc)

        # If the width of the new interval did not decrease enough, bisect
        if self.ab[1] - self.ab[0] > self._MU * ab_width:
            e, fe = d, fd
            z = sum(self.ab) / 2.0
            fz = self._callf(z)
            if fz == 0:
                return _ECONVERGED, z
            d, fd = self._update_bracket(z, fz)

        # Record d and e for next iteration
        self.d, self.fd = d, fd
        self.e, self.fe = e, fe

        status, xn = self.get_status()
        return status, xn
Пример #46
0
 def test_frexp_scalar(self):
     q = np.frexp(3. * u.m / (6. * u.m))
     assert q == (np.array(0.5), np.array(0.0))
Пример #47
0
        if _uint16:
            ext_dat += _bzero
        # Check to see whether there are any NaN's or infs which might indicate
        # a byte-swapping problem, such as being written out on little-endian
        #   and being read in on big-endian or vice-versa.
        if _code.find("float") >= 0 and (numpy.any(numpy.isnan(ext_dat)) or numpy.any(numpy.isinf(ext_dat))):
            errormsg += "===================================\n"
            errormsg += "= WARNING:                        =\n"
            errormsg += "=  Input image:                   =\n"
            errormsg += input + "[%d]\n" % (k + 1)
            errormsg += "=  had floating point data values =\n"
            errormsg += "=  of NaN and/or Inf.             =\n"
            errormsg += "===================================\n"
        elif _code.find("int") >= 0:
            # Check INT data for max values
            ext_dat_frac, ext_dat_exp = numpy.frexp(ext_dat)
            if ext_dat_exp.max() == int(_bitpix) - 1:
                # Potential problems with byteswapping
                errormsg += "===================================\n"
                errormsg += "= WARNING:                        =\n"
                errormsg += "=  Input image:                   =\n"
                errormsg += input + "[%d]\n" % (k + 1)
                errormsg += "=  had integer data values        =\n"
                errormsg += "=  with maximum bitvalues.        =\n"
                errormsg += "===================================\n"

        ext_hdu = fits.ImageHDU(data=ext_dat)

        rec = numpy.fromstring(dat[loc + data_size : loc + group_size], dtype=formats)

        loc += group_size
Пример #48
0
 def _compare_losses(self, loss_1, loss_2, delta=1.e-2):
   (digits_1, exponent_1) = np.frexp(loss_1)
   (digits_2, exponent_2) = np.frexp(loss_2)
   self.assertEqual(exponent_1, exponent_2)
   self.assertAlmostEqual(digits_1, digits_2, delta=delta)
Пример #49
0
def readgeis(input):
    """Input GEIS files "input" will be read and a HDUList object will
       be returned.

       The user can use the writeto method to write the HDUList object to
       a FITS file.
    """

    global dat
    cardLen = fits.Card.length

    # input file(s) must be of the form *.??h and *.??d
    if input[-1] != 'h' or input[-4] != '.':
        raise "Illegal input GEIS file name %s" % input

    data_file = input[:-1] + 'd'

    _os = sys.platform
    if _os[:
           5] == 'linux' or _os[:
                                5] == 'win32' or _os[:
                                                     5] == 'sunos' or _os[:
                                                                          3] == 'osf' or _os[:
                                                                                             6] == 'darwin':
        bytes_per_line = cardLen + 1
    else:
        raise "Platform %s is not supported (yet)." % _os

    geis_fmt = {'REAL': 'f', 'INTEGER': 'i', 'LOGICAL': 'i', 'CHARACTER': 'S'}
    end_card = 'END' + ' ' * (cardLen - 3)

    # open input file
    im = open(input)

    # Generate the primary HDU
    cards = []
    while 1:
        line = im.read(bytes_per_line)[:cardLen]
        line = line[:8].upper() + line[8:]
        if line == end_card:
            break
        cards.append(fits.Card.fromstring(line))

    phdr = fits.Header(cards)
    im.close()

    _naxis0 = phdr.get('NAXIS', 0)
    _naxis = [phdr['NAXIS' + str(j)] for j in range(1, _naxis0 + 1)]
    _naxis.insert(0, _naxis0)
    _bitpix = phdr['BITPIX']
    _psize = phdr['PSIZE']
    if phdr['DATATYPE'][:4] == 'REAL':
        _bitpix = -_bitpix
    if _naxis0 > 0:
        size = reduce(lambda x, y: x * y, _naxis[1:])
        data_size = abs(_bitpix) * size // 8
    else:
        data_size = 0
    group_size = data_size + _psize // 8

    # decode the group parameter definitions,
    # group parameters will become extension header
    groups = phdr['GROUPS']
    gcount = phdr['GCOUNT']
    pcount = phdr['PCOUNT']

    formats = []
    bools = []
    floats = []
    _range = range(1, pcount + 1)
    key = [phdr['PTYPE' + str(j)] for j in _range]
    comm = [phdr.cards['PTYPE' + str(j)].comment for j in _range]

    # delete group parameter definition header keywords
    _list = ['PTYPE'+str(j) for j in _range] + \
            ['PDTYPE'+str(j) for j in _range] + \
            ['PSIZE'+str(j) for j in _range] + \
            ['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO']

    # Construct record array formats for the group parameters
    # as interpreted from the Primary header file
    for i in range(1, pcount + 1):
        ptype = key[i - 1]
        pdtype = phdr['PDTYPE' + str(i)]
        star = pdtype.find('*')
        _type = pdtype[:star]
        _bytes = pdtype[star + 1:]

        # collect boolean keywords since they need special attention later

        if _type == 'LOGICAL':
            bools.append(i)
        if pdtype == 'REAL*4':
            floats.append(i)

        fmt = geis_fmt[_type] + _bytes
        formats.append((ptype, fmt))

    _shape = _naxis[1:]
    _shape.reverse()
    _code = fits.hdu.ImageHDU.NumCode[_bitpix]
    _bscale = phdr.get('BSCALE', 1)
    _bzero = phdr.get('BZERO', 0)
    if phdr['DATATYPE'][:10] == 'UNSIGNED*2':
        _uint16 = 1
        _bzero = 32768
    else:
        _uint16 = 0

    # delete from the end, so it will not conflict with previous delete
    for i in range(len(phdr) - 1, -1, -1):
        if phdr.cards[i].keyword in _list:
            del phdr[i]

    # clean up other primary header keywords
    phdr['SIMPLE'] = True
    phdr['BITPIX'] = 16
    phdr['GROUPS'] = False
    _after = 'NAXIS'
    if _naxis0 > 0:
        _after += str(_naxis0)
    phdr.set('EXTEND',
             value=True,
             comment="FITS dataset may contain extensions",
             after=_after)
    phdr.set('NEXTEND', value=gcount, comment="Number of standard extensions")

    hdulist = fits.HDUList([fits.PrimaryHDU(header=phdr, data=None)])

    # Use copy-on-write for all data types since byteswap may be needed
    # in some platforms.
    f1 = open(data_file, mode='rb')
    dat = f1.read()
    #    dat = memmap(data_file, mode='c')
    hdulist.mmobject = dat

    errormsg = ""

    loc = 0
    for k in range(gcount):
        ext_dat = numpy.fromstring(dat[loc:loc + data_size], dtype=_code)
        ext_dat = ext_dat.reshape(_shape)
        if _uint16:
            ext_dat += _bzero
        # Check to see whether there are any NaN's or infs which might indicate
        # a byte-swapping problem, such as being written out on little-endian
        #   and being read in on big-endian or vice-versa.
        if _code.find('float') >= 0 and \
            (numpy.any(numpy.isnan(ext_dat)) or numpy.any(numpy.isinf(ext_dat))):
            errormsg += "===================================\n"
            errormsg += "= WARNING:                        =\n"
            errormsg += "=  Input image:                   =\n"
            errormsg += input + "[%d]\n" % (k + 1)
            errormsg += "=  had floating point data values =\n"
            errormsg += "=  of NaN and/or Inf.             =\n"
            errormsg += "===================================\n"
        elif _code.find('int') >= 0:
            # Check INT data for max values
            ext_dat_frac, ext_dat_exp = numpy.frexp(ext_dat)
            if ext_dat_exp.max() == int(_bitpix) - 1:
                # Potential problems with byteswapping
                errormsg += "===================================\n"
                errormsg += "= WARNING:                        =\n"
                errormsg += "=  Input image:                   =\n"
                errormsg += input + "[%d]\n" % (k + 1)
                errormsg += "=  had integer data values        =\n"
                errormsg += "=  with maximum bitvalues.        =\n"
                errormsg += "===================================\n"

        ext_hdu = fits.ImageHDU(data=ext_dat)

        rec = numpy.fromstring(dat[loc + data_size:loc + group_size],
                               dtype=formats)

        loc += group_size

        # Create separate PyFITS Card objects for each entry in 'rec'
        for i in range(1, pcount + 1):
            #val = rec.field(i-1)[0]
            val = rec[0][i - 1]
            if val.dtype.kind == 'S':
                val = val.decode('ascii')

            if i in bools:
                if val:
                    val = True
                else:
                    val = False

            if i in floats:
                # use fromstring, format in Card is deprecated in pyfits 0.9
                _str = '%-8s= %20.7G / %s' % (key[i - 1], val, comm[i - 1])
                _card = fits.Card.fromstring(_str)
            else:
                _card = fits.Card(keyword=key[i - 1],
                                  value=val,
                                  comment=comm[i - 1])

            ext_hdu.header.append(_card)

        # deal with bscale/bzero
        if (_bscale != 1 or _bzero != 0):
            ext_hdu.header['BSCALE'] = _bscale
            ext_hdu.header['BZERO'] = _bzero

        hdulist.append(ext_hdu)

    if errormsg != "":
        errormsg += "===================================\n"
        errormsg += "=  This file may have been        =\n"
        errormsg += "=  written out on a platform      =\n"
        errormsg += "=  with a different byte-order.   =\n"
        errormsg += "=                                 =\n"
        errormsg += "=  Please verify that the values  =\n"
        errormsg += "=  are correct or apply the       =\n"
        errormsg += "=  '.byteswap()' method.          =\n"
        errormsg += "===================================\n"
        print(errormsg)

    f1.close()
    stsci(hdulist)
    return hdulist
Пример #50
0
 def check(result):
     assert_(type(result) is tuple)
     assert_equal(len(result), 2)
     mantissa, exponent = np.frexp(2**-3)
     _assert_equal_type_and_value(result[0], ArrayLike(mantissa))
     _assert_equal_type_and_value(result[1], ArrayLike(exponent))
Пример #51
0
 def test_frexp_array(self):
     q = np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.m))
     assert all((_q0, _q1) == np.frexp(_d) for _q0, _q1, _d
                in zip(q[0], q[1], [1. / 3., 1. / 2., 1.]))
Пример #52
0
def test_ufunc_frexp_c(A: dace.complex64[10]):
    Q, R = np.frexp(A)
    return Q, R
Пример #53
0
def convert(input):

    """Input GEIS files "input" will be read and a HDUList object will
       be returned that matches the waiver-FITS format written out by 'stwfits' in IRAF.

       The user can use the writeto method to write the HDUList object to
       a FITS file.
    """

    global dat
    cardLen = fits.Card.length

    # input file(s) must be of the form *.??h and *.??d
    if input[-1] != 'h' or input[-4] != '.':
        raise "Illegal input GEIS file name %s" % input

    data_file = input[:-1]+'d'

    _os = sys.platform
    if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin':
        bytes_per_line = cardLen+1
    else:
        raise "Platform %s is not supported (yet)." % _os

    end_card = 'END'+' '* (cardLen-3)

    # open input file
    im = open(input)

    # Generate the primary HDU
    cards = []
    while 1:
        line = im.read(bytes_per_line)[:cardLen]
        line = line[:8].upper() + line[8:]
        if line == end_card:
            break
        cards.append(fits.Card.fromstring(line))

    phdr = fits.Header(cards)
    im.close()

    # Determine starting point for adding Group Parameter Block keywords to Primary header
    phdr_indx = phdr.index('PSIZE')

    _naxis0 = phdr.get('NAXIS', 0)
    _naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)]
    _naxis.insert(0, _naxis0)
    _bitpix = phdr['BITPIX']
    _psize = phdr['PSIZE']
    if phdr['DATATYPE'][:4] == 'REAL':
        _bitpix = -_bitpix
    if _naxis0 > 0:
        size = reduce(lambda x,y:x*y, _naxis[1:])
        data_size = abs(_bitpix) * size // 8
    else:
        data_size = 0
    group_size = data_size + _psize // 8

    # decode the group parameter definitions,
    # group parameters will become extension table
    groups = phdr['GROUPS']
    gcount = phdr['GCOUNT']
    pcount = phdr['PCOUNT']

    formats = []
    bools = []
    floats = []
    cols = [] # column definitions used for extension table
    cols_dict = {} # provides name access to Column defs
    _range = range(1, pcount+1)
    key = [phdr['PTYPE'+str(j)] for j in _range]
    comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range]

    # delete group parameter definition header keywords
    _list = ['PTYPE'+str(j) for j in _range] + \
            ['PDTYPE'+str(j) for j in _range] + \
            ['PSIZE'+str(j) for j in _range] + \
            ['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO']

    # Construct record array formats for the group parameters
    # as interpreted from the Primary header file
    for i in range(1, pcount+1):
        ptype = key[i-1]
        pdtype = phdr['PDTYPE'+str(i)]
        star = pdtype.find('*')
        _type = pdtype[:star]
        _bytes = pdtype[star+1:]

        # collect boolean keywords since they need special attention later

        if _type == 'LOGICAL':
            bools.append(i)
        if pdtype == 'REAL*4':
            floats.append(i)

        # identify keywords which require conversion to special units
        if ptype in kw_DOUBLE:
            _type = 'DOUBLE'

        fmt = geis_fmt[_type] + _bytes
        formats.append((ptype,fmt))

        # Set up definitions for use in creating the group-parameter block table
        nrpt = ''
        nbits = str(int(_bytes)*8)
        if 'CHAR' in _type:
            nrpt = _bytes
            nbits = _bytes

        afmt = cols_fmt[_type]+ nbits
        if 'LOGICAL' in _type:
            afmt = cols_fmt[_type]
        cfmt = cols_pfmt[_type]+nrpt

        #print 'Column format for ',ptype,': ',cfmt,' with dtype of ',afmt
        cols_dict[ptype] = fits.Column(name=ptype,format=cfmt,array=numpy.zeros(gcount,dtype=afmt))
        cols.append(cols_dict[ptype]) # This keeps the columns in order

    _shape = _naxis[1:]
    _shape.reverse()
    _code = fits.hdu.ImageHDU.NumCode[_bitpix]
    _bscale = phdr.get('BSCALE', 1)
    _bzero = phdr.get('BZERO', 0)

    if phdr['DATATYPE'][:10] == 'UNSIGNED*2':
        _uint16 = 1
        _bzero = 32768
    else:
        _uint16 = 0

    # delete from the end, so it will not conflict with previous delete
    for i in range(len(phdr)-1, -1, -1):
        if phdr.cards[i].keyword in _list:
            del phdr[i]

    # clean up other primary header keywords
    phdr['SIMPLE'] = True
    phdr['GROUPS'] = False
    _after = 'NAXIS'
    if _naxis0 > 0:
        _after += str(_naxis0)
    phdr.set('EXTEND', value=True, comment="FITS dataset may contain extensions", after=_after)

    # Use copy-on-write for all data types since byteswap may be needed
    # in some platforms.
    f1 = open(data_file, mode='rb')
    dat = f1.read()
    errormsg = ""

    # Define data array for all groups
    arr_shape = _naxis[:]
    arr_shape[0] = gcount
    arr_stack = numpy.zeros(arr_shape,dtype=_code)

    loc = 0
    for k in range(gcount):
        ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code)
        ext_dat = ext_dat.reshape(_shape)
        if _uint16:
            ext_dat += _bzero
        # Check to see whether there are any NaN's or infs which might indicate
        # a byte-swapping problem, such as being written out on little-endian
        #   and being read in on big-endian or vice-versa.
        if _code.find('float') >= 0 and \
            (numpy.any(numpy.isnan(ext_dat)) or numpy.any(numpy.isinf(ext_dat))):
            errormsg += "===================================\n"
            errormsg += "= WARNING:                        =\n"
            errormsg += "=  Input image:                   =\n"
            errormsg += input+"[%d]\n"%(k+1)
            errormsg += "=  had floating point data values =\n"
            errormsg += "=  of NaN and/or Inf.             =\n"
            errormsg += "===================================\n"
        elif _code.find('int') >= 0:
            # Check INT data for max values
            ext_dat_frac,ext_dat_exp = numpy.frexp(ext_dat)
            if ext_dat_exp.max() == int(_bitpix) - 1:
                # Potential problems with byteswapping
                errormsg += "===================================\n"
                errormsg += "= WARNING:                        =\n"
                errormsg += "=  Input image:                   =\n"
                errormsg += input+"[%d]\n"%(k+1)
                errormsg += "=  had integer data values        =\n"
                errormsg += "=  with maximum bitvalues.        =\n"
                errormsg += "===================================\n"

        arr_stack[k] = ext_dat
        #ext_hdu = fits.hdu.ImageHDU(data=ext_dat)

        rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats)

        loc += group_size

        # Add data from this GPB to table
        for i in range(1, pcount+1):
            val = rec[0][i-1]
            if i in bools:
                if val:
                    val = 'T'
                else:
                    val = 'F'
            cols[i-1].array[k] = val

        # Based on the first group, add GPB keywords to PRIMARY header
        if k == 0:
            # Create separate PyFITS Card objects for each entry in 'rec'
            # and update Primary HDU with these keywords after PSIZE
            for i in range(1, pcount+1):
                #val = rec.field(i-1)[0]
                val = rec[0][i-1]
                if val.dtype.kind == 'S':
                    val = val.decode('ascii')

                if i in bools:
                    if val:
                        val = True
                    else:
                        val = False
                    
                elif i in floats:
                    # use fromstring, format in Card is deprecated in pyfits 0.9
                    _str = '%-8s= %20.13G / %s' % (key[i-1], val, comm[i-1])
                    _card = fits.Card.fromstring(_str)
                    
                else:
                    _card = fits.Card(key=key[i-1], value=val, comment=comm[i-1])

                phdr.insert(phdr_indx+i, _card)

            # deal with bscale/bzero
            if (_bscale != 1 or _bzero != 0):
                phdr['BSCALE'] = _bscale
                phdr['BZERO'] = _bzero

        #hdulist.append(ext_hdu)
    # Define new table based on Column definitions
    ext_table = fits.new_table(cols,tbtype='TableHDU')
    ext_table.header.set('EXTNAME', value=input+'.tab', after='TFIELDS')
    # Add column descriptions to header of table extension to match stwfits output
    for i in range(len(key)):
        ext_table.header.append(fits.Card(keyword=key[i], value=comm[i]))

    if errormsg != "":
        errormsg += "===================================\n"
        errormsg += "=  This file may have been        =\n"
        errormsg += "=  written out on a platform      =\n"
        errormsg += "=  with a different byte-order.   =\n"
        errormsg += "=                                 =\n"
        errormsg += "=  Please verify that the values  =\n"
        errormsg += "=  are correct or apply the       =\n"
        errormsg += "=  '.byteswap()' method.          =\n"
        errormsg += "===================================\n"
        print(errormsg)

    f1.close()

    hdulist = fits.HDUList([fits.PrimaryHDU(header=phdr, data=arr_stack)])
    hdulist.append(ext_table)

    stsci2(hdulist,input)
    return hdulist
Пример #54
0
def test_ufunc_frexp_f(A: dace.float32[10]):
    Q, R = np.frexp(A)
    return Q, R
Пример #55
0
    def iterate(self):
        """Perform one step in the algorithm.

        Implements Algorithm 4.1(k=1) or 4.2(k=2) in [APS1995]
        """
        self.iterations += 1
        eps = np.finfo(float).eps
        d, fd, e, fe = self.d, self.fd, self.e, self.fe
        ab_width = self.ab[1] - self.ab[0]  # Need the start width below
        c = None

        for nsteps in range(2, self.k+2):
            # If the f-values are sufficiently separated, perform an inverse
            # polynomial interpolation step.  Otherwise nsteps repeats of
            # an approximate Newton-Raphson step.
            if _notclose(self.fab + [fd, fe], rtol=0, atol=32*eps):
                c0 = _inverse_poly_zero(self.ab[0], self.ab[1], d, e,
                                        self.fab[0], self.fab[1], fd, fe)
                if self.ab[0] < c0 < self.ab[1]:
                    c = c0
            if c is None:
                c = _newton_quadratic(self.ab, self.fab, d, fd, nsteps)

            fc = self._callf(c)
            if fc == 0:
                return _ECONVERGED, c

            # re-bracket
            e, fe = d, fd
            d, fd = self._update_bracket(c, fc)

        # u is the endpoint with the smallest f-value
        uix = (0 if np.abs(self.fab[0]) < np.abs(self.fab[1]) else 1)
        u, fu = self.ab[uix], self.fab[uix]

        _, A = _compute_divided_differences(self.ab, self.fab,
                                            forward=(uix == 0), full=False)
        c = u - 2 * fu / A
        if np.abs(c - u) > 0.5 * (self.ab[1] - self.ab[0]):
            c = sum(self.ab) / 2.0
        else:
            if np.isclose(c, u, rtol=eps, atol=0):
                # c didn't change (much).
                # Either because the f-values at the endpoints have vastly
                # differing magnitudes, or because the root is very close to
                # that endpoint
                frs = np.frexp(self.fab)[1]
                if frs[uix] < frs[1 - uix] - 50:  # Differ by more than 2**50
                    c = (31 * self.ab[uix] + self.ab[1 - uix]) / 32
                else:
                    # Make a bigger adjustment, about the
                    # size of the requested tolerance.
                    mm = (1 if uix == 0 else -1)
                    adj = mm * np.abs(c) * self.rtol + mm * self.xtol
                    c = u + adj
                if not self.ab[0] < c < self.ab[1]:
                    c = sum(self.ab) / 2.0

        fc = self._callf(c)
        if fc == 0:
            return _ECONVERGED, c

        e, fe = d, fd
        d, fd = self._update_bracket(c, fc)

        # If the width of the new interval did not decrease enough, bisect
        if self.ab[1] - self.ab[0] > self._MU * ab_width:
            e, fe = d, fd
            z = sum(self.ab) / 2.0
            fz = self._callf(z)
            if fz == 0:
                return _ECONVERGED, z
            d, fd = self._update_bracket(z, fz)

        # Record d and e for next iteration
        self.d, self.fd = d, fd
        self.e, self.fe = e, fe

        status, xn = self.get_status()
        return status, xn
Пример #56
0
def normQuant(obj, sigfigs=None, full_norm=True):
    """Normalize quantities such that two things that *should* be equal are
    returned as identical objects.

    Handles floating point numbers, pint quantities, uncertainties, and
    combinations thereof as standalone objects or in sequences, dicts, or numpy
    ndarrays. Numerical precision issues and equal quantities represented in
    differently-scaled or different systems of units come out identically.

    Outputs from this function (**not** the inputs) deemed to be equal by the
    above logic will compare to be equal (via the `==` operator and via
    `pisa.utils.comparisons.recursiveEquality`) and will also hash to equal
    values (via `pisa.utils.hash.hash_obj`).

    Parameters
    ----------
    obj
        Object to be normalized.

    sigfigs : None or int > 0
        Number of digits to which to round numbers' significands; if None, do
        not round numbers.

    full_norm : bool
        If True, does full translation and normalization which is good across
        independent invocations and is careful about normalizing units, etc.
        If false, certain assumptions are made that modify the behavior
        described below in the Notes section which help speed things up in the
        case of e.g. a minimizer run, where we know certain things won't
        change:
        * Units are not normalized. They are assumed to stay the same from
          run to run.
        * sigfigs are not respected; full significant figures are returned
          (since it takes time to round all values appropriately).

    Returns
    -------
    normed_obj : object roughly of same type as input `obj`
        Simple types are returned as the same type as at the input, Numpy
        ndarrays are returned in the same shape and representation as the
        input, Mappings (dicts) are returned as OrderedDict, and all other
        sequences or iterables are returned as (possibly nested) lists.

    Notes
    -----
    Conversion logic by `obj` type or types found within `obj`:

    * **Sequences and OrderedDicts** (but not numpy arrays) are iterated
      through recursively.
    * **Mappings without ordering** (e.g. dicts) are iterated through
      recursively after sorting their keys, and are returned as
      OrderedDicts (such that the output is always consistent when
      serialized).
    * **Sequences** (not numpy arrays) are iterated through recursively.
    * **Numpy ndarrays** are treated as the below data types (according to the
      array's dtype).
    * **Simple objects** (non-floating point / non-sequence / non-numpy / etc.)
      are returned unaltered (e.g. strings).
    * **Pint quantities** (numbers with units): Convert to their base units.
    * **Floating-point numbers** (including the converted pint quantities):
      Round values to `sigfigs` significant figures.
    * **Numbers with uncertainties** (via the `uncertainties` module) have
      their nominal values rounded as above but their standard deviations are
      rounded to the same order of magnitude (*not* number of significant
      figures) as the nominal.
      Therefore passing obj=10.23+/-0.25 and sigfigs=2 returns 10+/-0.0.
      Note that **correlations are lost** in the outputs of this function, so
      equality of the output requires merely having equal nomial values and
      equal standard deviations.
      The calculations leading to these equal numbers might have used
      independent random variables to arrive at them, however, and so the
      `uncertainties` module would have evaluated them to be unequal. [1]

    To achieve rounding that masks floating point precision issues, set
    `sigfigs` to a value *less than* the number of decimal digits used for the
    significand of the calculation floating point precision.

    For reference, the IEEE 754 floating point standard [2] uses the following:

    * FP16 (half precision): **3.31** significand decimal digits (11 bits)
    * FP32 (single precision): **7.22** significand decimal digits (24 bits)
    * FP64 (double precision): **15.95** significand decimal digits (53 bits)
    * FP128 (quad precision): **34.02** significand decimal digits (113 bits)

    Logic for rounding the significand for numpy arrays was derived from [3].

    References
    ----------
    [1] https://github.com/lebigot/uncertainties/blob/master/uncertainties/test_uncertainties.py#L436

    [2] https://en.wikipedia.org/wiki/IEEE_floating_point

    [3] http://stackoverflow.com/questions/18915378, answer by user BlackGriffin.

    Examples
    --------
    Pint quantities hash to unequal values if specified in different scales or
    different systems of units (even if the underlying physical quantity is
    identical).

    >>> from pisa import ureg
    >>> from pisa.utils.hash import hash_obj
    >>> q0 = 1 * ureg.m
    >>> q1 = 100 * ureg.cm
    >>> q0 == q1
    True
    >>> hash_obj(q0) == hash_obj(q1)
    False

    Even the `to_base_units()` method fails for hashing to equal values, as
    `q0` is a float and `q1` is an integer.

    >>> hash_obj(q0.to_base_units()) == hash_obj(q1.to_base_units())
    False

    Even if both quantities are floating point numbers, finite precision
    effects in the `to_base_units` conversion can still cause two things which
    we "know" are equal to evaluate to be unequal.

    >>> q2 = 0.1 * ureg.m
    >>> q3 = 1e5 * ureg.um
    >>> q2 == q3
    True
    >>> q2.to_base_units() == q3.to_base_units()
    False

    `normQuant` handles all of these issues given an appropriate `sigfigs`
    argument.

    >>> q2_normed = normQuant(q2, sigfigs=12)
    >>> q3_normed = normQuant(q3, sigfigs=12)
    >>> q2_normed == q3_normed
    True
    >>> hash_obj(q2_normed) == hash_obj(q3_normed)
    True

    """
    #logging.trace('-'*80)
    #logging.trace('obj: %s', obj)
    #logging.trace('type(obj): %s', type(obj))
    if not full_norm:
        return obj

    # Nothing to convert for strings, None, ...
    if isinstance(obj, basestring) or obj is None:
        return obj

    round_result = False
    if sigfigs is not None:
        if not (int(sigfigs) == float(sigfigs) and sigfigs > 0):
            raise ValueError('`sigfigs` must be an integer > 0.')
        round_result = True
        sigfigs = int(sigfigs)

    # Store kwargs for easily passing to recursive calls of this function
    kwargs = dict(sigfigs=sigfigs, full_norm=full_norm)

    if hasattr(obj, 'normalized_state'):
        return obj.normalized_state

    # Recurse into dict by its (sorted) keys (or into OrderedDict using keys in
    # their defined order) and return an OrderedDict in either case.
    if isinstance(obj, Mapping):
        #logging.trace('Mapping')
        if isinstance(obj, OrderedDict):
            keys = obj.keys()
        else:
            keys = sorted(obj.keys())
        normed_obj = OrderedDict()
        for key in keys:
            normed_obj[key] = normQuant(obj[key], **kwargs)
        return normed_obj

    # Sequences, etc. but NOT numpy arrays (or pint quantities, which are
    # iterable) get their elements normalized and populated to a new list for
    # returning.
    # NOTE/TODO: allowing access across unit regestries for pragmatic (if
    # incorrect) reasons... may want to revisit this decision.
    # pylint: disable=protected-access
    misbehaving_sequences = (np.ndarray, pint.quantity._Quantity)
    if (isinstance(obj, (Iterable, Iterator, Sequence))
            and not isinstance(obj, misbehaving_sequences)):
        #logging.trace('Iterable, Iterator, or Sequence but not ndarray or'
        #              ' _Qauantity')
        return [normQuant(x, **kwargs) for x in obj]

    # Must be a numpy array or scalar if we got here...

    # NOTE: the order in which units (Pint module) and uncertainties
    # (uncertainties module) are handled is crucial! Essentially, it appears
    # that Pint is aware of uncertainties, but not vice versa. Hence the
    # ordering and descriptions used below.

    # The outermost "wrapper" of a number or numpy array is its Pint units. If
    # units are present, convert to base units, record the base units, and
    # strip the units off of the quantity by replacing it with its magnitude
    # (in the base units).

    has_units = False
    if isinstance(obj, pint.quantity._Quantity):
        #logging.trace('is a Quantity, converting to base units')
        has_units = True
        if full_norm:
            obj = obj.to_base_units()
        units = obj.units
        obj = obj.magnitude

    # The next layer possible for a number or numpy array to have is
    # uncertainties. If uncertainties are attached to `obj`, record a
    # "snapshot" (losing correlations) of the standard deviations. Then replace
    # the number or array solely with its nominal value(s).

    # NOTE: uncertainties.core.AffineScalarFunc includes such functions *and*
    # uncertainties.core.Variable objects

    has_uncertainties = False
    if isinstance(obj, AffineScalarFunc):
        #logging.trace('type is AffineScalarFunc')
        has_uncertainties = True
        std_devs = obj.std_dev
        obj = obj.nominal_value
    elif isinstance(obj, np.ndarray) and np.issubsctype(obj, AffineScalarFunc):
        #logging.trace('ndarray with subsctype is AffineScalarFunc')
        has_uncertainties = True
        std_devs = unp.std_devs(obj)
        obj = unp.nominal_values(obj)

    # What is done below will convert scalars into arrays, so get this info
    # before it is lost.
    is_scalar = isscalar(obj)

    if round_result:
        #logging.trace('rounding result')
        # frexp returns *binary* fraction (significand) and *binary* exponent
        bin_significand, bin_exponent = np.frexp(obj)
        exponent = LOG10_2 * bin_exponent
        exponent_integ = np.floor(exponent)
        exponent_fract = exponent - exponent_integ
        significand = bin_significand * 10**(exponent_fract)
        obj = np.around(significand, sigfigs - 1) * 10**exponent_integ

    # Now work our way *up* through the hierarchy: First, reintroduce
    # uncertainties

    if has_uncertainties and round_result:
        #logging.trace('uncertainties and rounding')
        std_bin_significand, std_bin_exponent = np.frexp(std_devs)
        std_exponent = LOG10_2 * std_bin_exponent
        std_exponent_integ = np.floor(std_exponent)
        std_exponent_fract = std_exponent - std_exponent_integ
        # Don't just scale magnitude by the stddev's fractional exponent; also
        # shift to be on the same scale (power-of-10) as the nominal value
        delta_order_of_mag = std_exponent_integ - exponent_integ
        std_significand = (std_bin_significand *
                           10**(std_exponent_fract + delta_order_of_mag))
        # Now rounding on the stddev's significand occurs at the same order of
        # magnitude as rounding on the nominal value (and so scaling is done
        # with `exponent_integ`, NOT `std_exponent_integ`)
        std_devs = (np.around(std_significand, sigfigs - 1) *
                    10**exponent_integ)

    if has_uncertainties:
        #logging.trace('recreate uncertainties array')
        obj = unp.uarray(obj, std_devs)
        # If it was a scalar, it has become a len-1 array; extract the scalar
        if is_scalar:
            #logging.trace('converting to scalar')
            obj = obj[0]

    # Finally, attach units if they were present
    if has_units:
        #logging.trace('reattaching units')
        obj = obj * units

    return obj