Example #1
0
def test_ldexp():
  rng = np.random.RandomState(seed=42)
  x = rng.randn(3, 7).astype(bfloat16)
  y = rng.randint(-50, 50, (1, 7))
  numpy_assert_allclose(
      np.ldexp(x, y).astype(np.float32), np.ldexp(x.astype(np.float32), y),
      rtol=1e-2, atol=1e-6)
Example #2
0
 def test_ldexp_invalid(self):
     with pytest.raises(TypeError) as exc:
         np.ldexp(3. * u.m, 4.)
     # built-in TypeError, so can't check content of exception
     with pytest.raises(TypeError) as exc:
         np.ldexp(3., 4 * u.m)
     assert "Cannot use ldexp" in exc.value.args[0]
    def test_ldexp_1(self):

        a = np.ldexp(5, np.arange(4))
        print(a)

        b = np.ldexp(np.arange(4), 5)
        print(b)
Example #4
0
 def test_ldexp_invalid(self):
     with pytest.raises(TypeError) as exc:
         np.ldexp(3. * u.m, 4.)
     # built-in TypeError, so can't check content of exception
     with pytest.raises(TypeError) as exc:
         np.ldexp(3., 4 * u.m)
     assert "Cannot use ldexp" in exc.value.args[0]
Example #5
0
def _hdr_read(filename, use_imageio=False):
    """Read hdr file.

.. TODO:

    * Support axis other than -Y +X
"""
    if use_imageio:
        return imageio.imread(filename, **kwargs)

    with open(filename, "rb") as f:
        MAGIC = f.readline().strip()
        assert MAGIC == b'#?RADIANCE', "Wrong header found in {}".format(
            filename)
        comments = b""
        while comments[:6] != b"FORMAT":
            comments = f.readline().strip()
            assert comments[:3] != b"-Y ", "Could not find data format"
        assert comments == b'FORMAT=32-bit_rle_rgbe', "Format not supported"
        while comments[:3] != b"-Y ":
            comments = f.readline().strip()
        _, height, _, width = comments.decode("ascii").split(" ")
        height, width = int(height), int(width)
        rgbe = np.fromfile(f, dtype=np.uint8).reshape((height, width, 4))
        rgb = np.empty((height, width, 3), dtype=np.float)
        rgb[..., 0] = np.ldexp(rgbe[..., 0], rgbe[..., 3].astype('int') - 128)
        rgb[..., 1] = np.ldexp(rgbe[..., 1], rgbe[..., 3].astype('int') - 128)
        rgb[..., 2] = np.ldexp(rgbe[..., 2], rgbe[..., 3].astype('int') - 128)
        # TODO: This will rescale all the values to be in [0, 1]. Find a way to retrieve the original values.
        rgb /= rgb.max()
    return rgb
Example #6
0
    def _make_stateful_computation_error_model(self):
        b = self.parameters.b.astype(float)
        a = self.parameters.a.astype(float)
        if hasattr(self.parameters, 'k'):
            b = np.ldexp(b, self.parameters.k)
            a[1:] = np.ldexp(a[1:], self.parameters.k)

        n_s = self.states[0][0]
        b = np.pad(b, (0, n_s + 1 - len(b)))
        a = np.pad(a, (0, n_s + 1 - len(a)))

        if n_s > 1:
            A = np.zeros((n_s, n_s))
            A[0, :] = -a[1:n_s + 1]
            A[1:, :-1] = np.eye(n_s - 1)
            C = np.array([
                -b[0] * a[i] + b[i]
                for i in range(1, n_s + 1)
            ])
        else:
            A = -a[1]
            C = -b[0] * a[1] + b[1]

        B = np.zeros((n_s, 2))
        B[0, 0] = 1
        D = np.array([b[0], 1])

        return signal.dlti(A, B, C, D)
Example #7
0
def test(n, bits):
    print(np.log2(n))
    s1 = computeScale(n, bits)
    s2 = getScale(n, bits)
    v1 = int(np.ldexp(n, -s1))
    v2 = int(np.ldexp(n, -s2))
    print("%f scale = %d int = %d" % (n, s1, v1))
    print("%f scale = %d int = %d" % (n, s2, v2))
Example #8
0
 def test_ldexp(self):
     dtype = np.float64
     nvec = 5000
     xr, std = generate_random_xr(dtype, nvec=nvec, max_bin_exp=200)
     exp = np.asarray(xr["exp"])
     out = np.empty(std.shape, dtype)
     numba_test_ldexp(std, exp, out)
     np.testing.assert_array_equal(out, np.ldexp(std, exp))
     numba_test_ldexp(std, -exp, out)
     np.testing.assert_array_equal(out, np.ldexp(std, -exp))
Example #9
0
 def testLdexp(self, float_type):
     rng = np.random.RandomState(seed=42)
     x = rng.randn(3, 7).astype(float_type)
     y = rng.randint(-50, 50, (1, 7)).astype(np.int32)
     self.assertEqual(np.ldexp(x, y).dtype, x.dtype)
     numpy_assert_allclose(np.ldexp(x, y).astype(np.float32),
                           truncate(np.ldexp(x.astype(np.float32), y),
                                    float_type=float_type),
                           rtol=1e-2,
                           atol=1e-6,
                           float_type=float_type)
Example #10
0
def generate_bound4less1(inp):
    a = np.frexp(inp)
    if inp==0:
        return [-pow(2 ,-1022) ,pow(2 ,-1022)]
    if inp < 0:
        tmp_i = np.ldexp(-0.5, a[1])
        tmp_j = np.ldexp(-0.5, a[1] + 1)
        return [tmp_j, tmp_i]
    else:
        tmp_i = np.ldexp(0.5, a[1])
        tmp_j = np.ldexp(0.5, a[1] + 1)
        return [tmp_i, tmp_j]
Example #11
0
File: x86.py Project: shas19/EdgeML
    def printModelParamsWithBitwidth(self):
        if config.vbwEnabled and forFixed():
            for var in self.globalVars:
                if var + "idx" in self.globalVars and var + "val" in self.globalVars:
                    continue
                bw = self.varsForBitwidth[var]
                typ_str = "int%d_t" % bw
                size = self.decls[var].shape
                sizestr = ''.join(["[%d]" % (i) for i in size])

                Xindexstr = ''
                Xintstar = ''.join(["*" for i in size])

                if var != 'X':
                    self.out.printf(typ_str + " " + var + sizestr + ";\n", indent = True)
                else:
                    self.out.printf(typ_str + Xintstar + " " + var + ";\n", indent = True)

                for i in range(len(size)):
                    Xindexstr += ("[i" + str(i-1) + "]" if i > 0 else "")
                    if var == 'X':
                        Xintstar = Xintstar[:-1]
                        self.out.printf("X%s = new %s%s[%d];\n" % (Xindexstr, typ_str, Xintstar, size[i]), indent=True)
                    self.out.printf("for (int i%d = 0; i%d < %d; i%d ++) {\n" % (i,i,size[i], i), indent = True)
                    self.out.increaseIndent()

                indexstr = ''.join("[i" + str(i) + "]" for i in range(len(size)))
                divide = int(round(np.ldexp(1, config.wordLength - self.varsForBitwidth[var] + (self.demotedVarsOffsets.get(var, 0) if self.varsForBitwidth[var] != config.wordLength else 0) ))) if var[-3:] != "idx" and var != "X" else 1
                self.out.printf(var + indexstr + " = " + var + "_temp" + indexstr + "/" + str(divide) + ";\n", indent = True)

                for i in range(len(size)):
                    self.out.decreaseIndent()
                    self.out.printf("}\n", indent = True)
Example #12
0
    def test_half_ufuncs(self):
        """Test the various ufuncs"""

        a = np.array([0, 1, 2, 4, 2], dtype=float16)
        b = np.array([-2, 5, 1, 4, 3], dtype=float16)
        c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)

        assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
        assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
        assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
        assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])

        assert_equal(np.equal(a, b), [False, False, False, True, False])
        assert_equal(np.not_equal(a, b), [True, True, True, False, True])
        assert_equal(np.less(a, b), [False, True, False, False, True])
        assert_equal(np.less_equal(a, b), [False, True, False, True, True])
        assert_equal(np.greater(a, b), [True, False, True, False, False])
        assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
        assert_equal(np.logical_and(a, b), [False, True, True, True, True])
        assert_equal(np.logical_or(a, b), [True, True, True, True, True])
        assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
        assert_equal(np.logical_not(a), [True, False, False, False, False])

        assert_equal(np.isnan(c), [False, False, False, True, False])
        assert_equal(np.isinf(c), [False, False, True, False, False])
        assert_equal(np.isfinite(c), [True, True, False, False, True])
        assert_equal(np.signbit(b), [True, False, False, False, False])

        assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])

        assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
        x = np.maximum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [0, 5, 1, 0, 6])
        assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
        x = np.minimum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [-2, -1, -np.inf, 0, 3])
        assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
        assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
        assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
        assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])

        assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
        assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
        assert_equal(np.square(b), [4, 25, 1, 16, 9])
        assert_equal(np.reciprocal(b),
                     [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
        assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
        assert_equal(np.conjugate(b), b)
        assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
        assert_equal(np.negative(b), [2, -5, -1, -4, -3])
        assert_equal(np.positive(b), b)
        assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
        assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
        assert_equal(np.frexp(b),
                     ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
        assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
Example #13
0
def _expm_product_helper(A, mu, iteration_stash, t, B):
    # Estimate expm(t*M).dot(B).
    # A = M - mu*I
    # mu = mean(trace(M))
    # The iteration stash helps to compute numbers of iterations to use.
    # t is a scaling factor.
    # B is the input matrix for the linear operator.

    # Compute some input-dependent constants.
    tol = np.ldexp(1, -53)
    n0 = B.shape[1]
    m, s = iteration_stash.fragment_3_1(n0, t)

    #print('1-norm:', A.one_norm(), 't:', t, 'mu:', mu, 'n0:', n0, 'm:', m, 's:', s)

    # Get the lapack function for computing matrix norms.
    lange, = get_lapack_funcs(('lange', ), (B, ))

    F = B
    eta = np.exp(t * mu / float(s))
    for i in range(s):
        c1 = lange('i', B)
        for j in range(m):
            coeff = t / float(s * (j + 1))
            B = coeff * A.dot(B)
            c2 = lange('i', B)
            F = F + B
            if c1 + c2 <= tol * lange('i', F):
                break
            c1 = c2
        F = eta * F
        B = F
    return F
Example #14
0
def convertIntToFloat_array( x, n_bytes ):
    int_na = getIntNA( n_bytes )
    y = np.zeros( len( x ) )
    y[ x == int_na ] = np.nan
    y[ x != int_na ] = np.ldexp( x[ x != int_na ], -1 * ( 8 * n_bytes - 2 ) )
    
    return y
Example #15
0
    def _roots(p):
        """Modified version of NumPy's roots function.

        NumPy's roots uses the companion matrix method, which divides by
        p[0]. This can causes overflows/underflows. Instead form a
        modified companion matrix that is scaled by 2^c * p[0], where the
        exponent c is chosen to balance the magnitudes of the
        coefficients. Since scaling the matrix just scales the
        eigenvalues, we can remove the scaling at the end.

        Scaling by a power of 2 is chosen to avoid rounding errors.

        """
        _, e = np.frexp(p)
        # Balance the most extreme exponents e_max and e_min by solving
        # the equation
        #
        # |c + e_max| = |c + e_min|.
        #
        # Round the exponent to an integer to avoid rounding errors.
        c = int(-0.5 * (np.max(e) + np.min(e)))
        p = np.ldexp(p, c)

        A = np.diag(np.full(p.size - 2, p[0]), k=-1)
        A[0,:] = -p[1:]
        eigenvalues = np.linalg.eigvals(A)
        return eigenvalues / p[0]
Example #16
0
def _expm_product_helper(A, mu, iteration_stash, t, B):
    # Estimate expm(t*M).dot(B).
    # A = M - mu*I
    # mu = mean(trace(M))
    # The iteration stash helps to compute numbers of iterations to use.
    # t is a scaling factor.
    # B is the input matrix for the linear operator.

    # Compute some input-dependent constants.
    tol = np.ldexp(1, -53)
    n0 = B.shape[1]
    m, s = iteration_stash.fragment_3_1(n0, t)

    #print('1-norm:', A.one_norm(), 't:', t, 'mu:', mu, 'n0:', n0, 'm:', m, 's:', s)

    # Get the lapack function for computing matrix norms.
    lange, = get_lapack_funcs(('lange',), (B,))

    F = B
    eta = np.exp(t*mu / float(s))
    for i in range(s):
        c1 = lange('i', B)
        for j in range(m):
            coeff = t / float(s*(j+1))
            B = coeff * A.dot(B)
            c2 = lange('i', B)
            F = F + B
            if c1 + c2 <= tol * lange('i', F):
                break
            c1 = c2
        F = eta * F
        B = F
    return F
Example #17
0
    def _make_stateful_computation_error_model(self):
        a = self.parameters.a.astype(float)
        if hasattr(self.parameters, 'k'):
            a[1:] = np.ldexp(a[1:], self.parameters.k)
        n_s = self.states[0][0]
        a = np.pad(a, (0, n_s + 1 - len(a)))

        if n_s > 1:
            A = np.zeros((n_s, n_s))
            A[:, 0] = -a[1:n_s + 1]
            A[:-1, 1:] = np.eye(n_s - 1)

            C = np.zeros((1, n_s))
            C[0, 0] = 1
        else:
            A = -a[1]
            C = 1

        B = np.zeros((n_s, n_s + 1))
        B[:, 0] = -a[1:n_s + 1]
        B[:, 1:] = np.eye(n_s)

        D = np.zeros((1, n_s + 1))
        D[0, 0] = 1

        return signal.dlti(A, B, C, D)
Example #18
0
    def __init__(self,
            Q_primary, primary_distn, primary_to_part,
            rate_on, rate_off):
        """

        """
        # Store the inputs.
        self.Q_primary = Q_primary
        self.primary_distn = primary_distn
        self.primary_to_part = primary_to_part
        self.rate_on = rate_on
        self.rate_off = rate_off

        # Precompute some summaries which can be computed quickly
        # and do not use much memory.
        # Summaries that are slow or complicated to compute or which may use
        # too much memory are computed on demand
        # through an explicit function call.
        self.nprimary = len(primary_to_part)
        self.nparts = len(set(primary_to_part.values()))
        self.ncompound = int(np.ldexp(self.nprimary, self.nparts))
        self.tolerance_distn = get_tolerance_distn(rate_off, rate_on)

        # Mark some attributes as un-initialized.
        # These attributes are related to the compound distribution,
        # and may be initialized later using init_compound().
        self.Q_compound = None
        self.compound_distn = None
        self.compound_to_primary = None
        self.compound_to_tolerances = None
Example #19
0
 def _pretty(self, printer):
     z = sympy.UnevaluatedExpr(sympy.Symbol('z'))
     k = getattr(self.parameters, 'k', 0)
     num = None
     for i, c in enumerate(self.parameters.b):
         if c:
             term = printer._print(
                 np.ldexp(float(c), k) * z**-i)
             num = num + term if num else term
     if num is None:
         num = 0
     den = printer._print(float(self.parameters.a[0]))
     for i, c in enumerate(self.parameters.a[1:], start=1):
         if c:
             den += printer._print(np.ldexp(float(c), k) * z**-i)
     return printer._print(num) / printer._print(den)
    def _uniform_sampler(self):
        """
        Uniformly sample the full domain of floating-point numbers between (0, 1), rather than only multiples of 2^-53.
        A uniform distribution over D ∩ (0, 1) can be generated by independently sampling an exponent
        from the geometric distribution with parameter .5 and a significand by drawing a uniform string from
        {0, 1}^52 [Mir12]_

        Based on code recipe in Python standard library documentation [Py21]_.

        Returns
        -------
        float
            A value sampled from float in (0, 1) with probability proportional to the size of the infinite-precision
            real interval each float represents

        References
        ----------
        .. [Py21]  The Python Standard Library. "random — Generate pseudo-random numbers", 2021
        https://docs.python.org/3/library/random.html#recipes
        """
        mantissa_size = np.finfo(float).nmant
        mantissa = 1 << mantissa_size | self._rng.getrandbits(mantissa_size)
        exponent = -(mantissa_size + 1)
        x = 0
        while not x:
            x = self._rng.getrandbits(32)
            exponent += x.bit_length() - 32
        return np.ldexp(mantissa, exponent)
    def test_half_ufuncs(self):
        """Test the various ufuncs"""

        a = np.array([0, 1, 2, 4, 2], dtype=float16)
        b = np.array([-2, 5, 1, 4, 3], dtype=float16)
        c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)

        assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
        assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
        assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
        assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])

        assert_equal(np.equal(a, b), [False, False, False, True, False])
        assert_equal(np.not_equal(a, b), [True, True, True, False, True])
        assert_equal(np.less(a, b), [False, True, False, False, True])
        assert_equal(np.less_equal(a, b), [False, True, False, True, True])
        assert_equal(np.greater(a, b), [True, False, True, False, False])
        assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
        assert_equal(np.logical_and(a, b), [False, True, True, True, True])
        assert_equal(np.logical_or(a, b), [True, True, True, True, True])
        assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
        assert_equal(np.logical_not(a), [True, False, False, False, False])

        assert_equal(np.isnan(c), [False, False, False, True, False])
        assert_equal(np.isinf(c), [False, False, True, False, False])
        assert_equal(np.isfinite(c), [True, True, False, False, True])
        assert_equal(np.signbit(b), [True, False, False, False, False])

        assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])

        assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
        x = np.maximum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [0, 5, 1, 0, 6])
        assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
        x = np.minimum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [-2, -1, -np.inf, 0, 3])
        assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
        assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
        assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
        assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])

        assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
        assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
        assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
        assert_equal(np.square(b), [4, 25, 1, 16, 9])
        assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
        assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
        assert_equal(np.conjugate(b), b)
        assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
        assert_equal(np.negative(b), [2, -5, -1, -4, -3])
        assert_equal(np.positive(b), b)
        assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
        assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
        assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
        assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
Example #22
0
def unpack_bmp_bgra_to_float(bmp):
    b = bmp[:, :, 0].astype(np.int32)
    g = bmp[:, :, 1].astype(np.int32) << 16
    r = bmp[:, :, 2].astype(np.int32) << 8
    a = bmp[:, :, 3].astype(np.int32)
    depth = np.ldexp(1.0, b -
                     (128 + 24)) * (g + r + a + 0.5).astype(np.float32)
    return depth
Example #23
0
    def _make_state_observer_models(self):
        b = self.parameters.b.astype(float)
        a = self.parameters.a.astype(float)
        if hasattr(self.parameters, 'k'):
            b = np.ldexp(b, self.parameters.k)
            a[1:] = np.ldexp(a[1:], self.parameters.k)
        n_s = self.states[0][0]
        a = np.pad(a, (0, n_s + 1 - len(a)))
        b = np.pad(b, (0, n_s + 1 - len(b)))

        A = np.zeros((n_s, n_s))
        A[:, 0] = -a[1:n_s + 1]
        A[:-1, 1:] = np.eye(n_s - 1)
        B = np.c_[b[1:n_s + 1] - a[1:n_s + 1] * b[0]]
        C = np.eye(n_s)
        D = np.zeros((n_s, 1))
        return signal.dlti(A, B, C, D),
Example #24
0
def nextfloat(fin):
    """Return (approximately) next float value (for f>0)."""
    d = 2**-52
    split = N.frexp(fin)
    while True:
        fout = N.ldexp(split[0] + d, split[1])
        if fin != fout:
            return fout
        d *= 2
Example #25
0
def nextfloat(fin):
    """Return (approximately) next float value (for f>0)."""
    d = 2**-52
    split = N.frexp(fin)
    while True:
        fout = N.ldexp(split[0] + d, split[1])
        if fin != fout:
            return fout
        d *= 2
Example #26
0
	def visitFloat(self, node:AST.Float, args=None):
		r = node.value
		p = self.get_expnt(abs(r))
		k = IR.DataType.getInt(np.ldexp(r, p))
		expr = self.getTempVar()
		prog = IR.Prog([IR.Comment('Float to int : {0} to {1}, isSecret = {2}.'.format(str(r), str(k), node.isSecret))])
		prog = IRUtil.prog_merge(IR.Prog([IR.Decl(expr.idf, node.type, -1, node.isSecret, [k])]), prog)
		if (not(Util.Config.disableTruncOpti)):
			self.scaleFacMapping[expr.idf] = self.scaleFac
		return (prog, expr)
Example #27
0
    def decode(self, i):
        i = np.array(i)
        i = 255 - i
        sgn = i > 127
        e = np.array(np.floor(i / 16.0) - 8 * sgn + 1, dtype=np.uint8)
        f = i % 16
        data = np.ldexp(f, e + 2)
        e = MuLaw.etab[e-1]
        data = MuLaw.iscale * (1 - 2 * sgn) * (e + data)
	
        return data
Example #28
0
 def _make_model(self):
     b = self.parameters.b.astype(float)
     a = self.parameters.a.astype(float)
     if not self.states:
         # Handle degenerate model
         return signal.dlti(b[0], a[0])
     # From negative to positive powers
     pad_width = len(a) - len(b)
     if pad_width > 0:
         b = np.pad(b, (0, pad_width))
     elif pad_width < 0:
         a = np.pad(a, (0, -pad_width))
     num = np.trim_zeros(b, 'f')
     if num.size == 0:
         num = b[0:1]
     den = np.trim_zeros(a, 'f')
     if hasattr(self.parameters, 'k'):
         num = np.ldexp(num, self.parameters.k)
         den[1:] = np.ldexp(den[1:], self.parameters.k)
     return signal.dlti(num, den)
Example #29
0
 def _make_state_observer_models(self):
     a = self.parameters.a.astype(float)
     if hasattr(self.parameters, 'k'):
         a[1:] = np.ldexp(a[1:], self.parameters.k)
     n_s = self.states[0][0]
     A = np.zeros((n_s, n_s))
     A[0, :len(a) - 1] = -a[1:]
     A[1:, :-1] = np.eye(n_s - 1)
     B = np.zeros((n_s, 1))
     B[0, 0] = 1
     C = np.eye(n_s)
     D = np.zeros((n_s, 1))
     return signal.dlti(A, B, C, D),
Example #30
0
 def visitFloat(self, node: AST.Float, args=None):
     r = node.value
     p = self.get_expnt(abs(r))
     k = IR.DataType.getInt(np.ldexp(r, p))
     comment = IR.Comment('Float to int : ' + str(r) + ' to ' + str(k))
     expr = None
     if not (node.isSecret):
         expr = IR.Int(k)
     else:
         expr = self.getTempVar()
         self.decls[expr.idf] = [node.type]
     prog = IRUtil.prog_merge(IR.Prog([IR.Decl(expr.idf, node.type)]), prog)
     return (IR.Prog([comment]), expr)
Example #31
0
	def visitFloat(self, node:AST.Float, args=None):
		r = node.value
		p = self.get_expnt(abs(r))
		k = IR.DataType.getInt(np.ldexp(r, p))
		expr = None
		prog = IR.Prog([IR.Comment('Float to int : {0} to {1}, isSecret = {2}.'.format(str(r), str(k), node.isSecret))])
		if not(node.isSecret):
			expr = IR.Int(k)
		else:
			expr = self.getTempVar()
			self.decls[expr.idf] = [node.type]
			prog = IRUtil.prog_merge(IR.Prog([IR.Decl(expr.idf, node.type)]), prog)
		return (prog, expr)
Example #32
0
    def populateExpTable(self, p):
        [table_m, table_n] = self.expTableShape
        b = np.log2(table_n)

        # Currently looking at only 2D arrays
        assert table_m == 2

        [m, M] = self.expRange
        max = int(np.ldexp(M - m, -p))
        shl = self.getShl(max)

        #alpha_count = self.getAlphaCount(max, shl)
        alpha_count = table_n
        beta_count = table_n

        table = [[0 for _ in range(alpha_count)],
                 [0 for _ in range(beta_count)]]

        alpha = Common.wordLength - shl - b
        pRes = self.getScale(1)
        for i in range(alpha_count):
            num = i * 2**(alpha + p)
            exp = np.exp(-num)
            table[0][i] = int(np.ldexp(exp, -pRes))

        beta = alpha - b
        pRes = self.getScale(abs(np.exp(-m)))
        for i in range(beta_count):
            num = m + i * 2**(beta + p)
            exp = np.exp(-num)
            table[1][i] = int(np.ldexp(exp, -pRes))

        tableVar = [
            IR.Var('EXP' + str(abs(p)) + 'A', inputVar=True),
            IR.Var('EXP' + str(abs(p)) + 'B', inputVar=True)
        ]

        return [table, tableVar]
Example #33
0
def from_npfloat(x, prec=113, rnd=round_fast):
    """Create a raw mpf from a numpy float, rounding if necessary.
    If prec >= 113, the result is guaranteed to represent exactly the
    same number as the input. If prec is not specified, use prec=113."""
    y = float(x)
    if x == y: # ldexp overflows for float16
        return from_float(y, prec, rnd)
    import numpy as np
    if np.isfinite(x):
        m, e = np.frexp(x)
        return from_man_exp(int(np.ldexp(m, 113)), int(e-113), prec, rnd)
    if np.isposinf(x): return finf
    if np.isneginf(x): return fninf
    return fnan
Example #34
0
def from_npfloat(x, prec=113, rnd=round_fast):
    """Create a raw mpf from a numpy float, rounding if necessary.
    If prec >= 113, the result is guaranteed to represent exactly the
    same number as the input. If prec is not specified, use prec=113."""
    y = float(x)
    if x == y: # ldexp overflows for float16
        return from_float(y, prec, rnd)
    import numpy as np
    if np.isfinite(x):
        m, e = np.frexp(x)
        return from_man_exp(int(np.ldexp(m, 113)), int(e-113), prec, rnd)
    if np.isposinf(x): return finf
    if np.isneginf(x): return fninf
    return fnan
Example #35
0
    def test_roundtrip(self, ftype, frac_vals, exp_vals):
        for frac, exp in zip(frac_vals, exp_vals):
            f = np.ldexp(frac, exp, dtype=ftype)
            n, d = f.as_integer_ratio()

            try:
                # workaround for gh-9968
                nf = np.longdouble(str(n))
                df = np.longdouble(str(d))
            except (OverflowError, RuntimeWarning):
                # the values may not fit in any float type
                pytest.skip("longdouble too small on this platform")

            assert_equal(nf / df, f, "{}/{}".format(n, d))
    def test_roundtrip(self, ftype, frac_vals, exp_vals):
        for frac, exp in zip(frac_vals, exp_vals):
            f = np.ldexp(frac, exp, dtype=ftype)
            n, d = f.as_integer_ratio()

            try:
                # workaround for gh-9968
                nf = np.longdouble(str(n))
                df = np.longdouble(str(d))
            except (OverflowError, RuntimeWarning):
                # the values may not fit in any float type
                pytest.skip("longdouble too small on this platform")

            assert_equal(nf / df, f, "{}/{}".format(n, d))
Example #37
0
    def visitFloat(self, node: AST.Float):
        val = node.value
        scale = self.getScale(abs(val))
        intv = self.getInterval(scale, val, val)
        val_int = IR.DataType.getInt(np.ldexp(val, -scale))

        prog = IR.Prog([])
        expr = self.getTempVar()

        self.decls[expr.idf] = node.type
        self.scales[expr.idf] = scale
        self.intvs[expr.idf] = intv
        self.cnsts[expr.idf] = val_int

        return (prog, expr)
Example #38
0
    def _cubic_exp2_approx(self, value):
        """ Derived from Cardano's formula """

        exponent = math.floor(value)
        delta_0 = self.B * self.B - 3 * self.A * self.C
        delta_1 = (2 * self.B * self.B * self.B -
                   9 * self.A * self.B * self.C - 27 * self.A * self.A *
                   (value - exponent))
        cardano = np.cbrt(
            (delta_1 -
             np.sqrt(delta_1 * delta_1 - 4 * delta_0 * delta_0 * delta_0)) / 2)
        significand_plus_one = (-(self.B + cardano + delta_0 / cardano) /
                                (3 * self.A) + 1)
        mantissa = significand_plus_one / 2
        return np.ldexp(mantissa, exponent + 1)
Example #39
0
def tiny_perturbation(arr):
	fr = numpy.frexp(arr)
	perturb = numpy.random.random(fr[0].shape) * 1e-12
	return numpy.ldexp(fr[0]+perturb, fr[1])
Example #40
0
def math():
    global a, b, c, z, r

    print '###########################'
    print '#'
    print '#   数学相关'
    print '#'
    print '###########################'
    ###     三角函数    ###
    print '三角形斜边:\n', np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
    print '弧度转角度1:\n', np.degrees(np.arange(12.)*np.pi/6)
    print '弧度转角度2:\n', np.rad2deg(np.pi/2)
    print '角度转弧度1:\n', np.radians(np.arange(12.)*30)
    print '角度转弧度2:\n', np.deg2rad(180)
    print '最大距离截断:\n', np.unwrap(np.linspace(0, np.pi, num=5))

    ###     取整    ###
    print '根据位数取整:\n', np.around([0.37, 1.64], decimals=1)
    print '四舍五入取整:\n', np.rint([0.37, 1.64])
    print '向零取整1:\n', np.fix([-1.5, -0.8, 0.37, 1.64])
    print '向零取整2:\n', np.trunc([-1.5, -0.8, 0.37, 1.64])
    print '向下取整:\n', np.floor([0.37, 1.64])
    print '向上取整:\n', np.ceil([0.37, 1.64])

    ###     四则运算    ###
    print '乘积:\n', np.prod([[1.,2.],[3.,4.]], axis=1)
    print '求和:\n', np.sum([[0, 1], [0, 5]], axis=0)
    print '累计乘积:\n', np.cumprod([[1, 2], [3, 4]])
    print '累计求和:\n', np.cumsum([[1, 2], [3, 4]])
    print '求差:\n', np.diff([[1, 2], [3, 4]])
    print '一维求差:\n', np.ediff1d([[1, 2], [3, 4]])
    print '梯度:\n', np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
    print '向量积(叉积):\n', np.cross([1, 2, 3], [4, 5, 6])
    print '加法:\n', np.add(np.arange(9).reshape(3, 3), np.arange(3))
    print '倒数:\n', np.reciprocal([1, 2., 3.33])
    print '负数:\n', np.negative([1.,-1.])
    print '乘法:\n', np.multiply(2.0, 4.0)
    print '除法:\n', np.divide(5.0, 4.0)
    print '除法带小数:\n', np.true_divide(5.0, 4.0)
    print '除法向下取整:\n', np.floor_divide(5.0, 4.0)
    print '指数:\n', np.power(np.arange(6), 3)
    print '减法:\n', np.subtract(1.0, 4.0)
    print '求余1:\n', np.mod([-3, -2, -1, 1, 2, 3], 2)
    print '求余2:\n', np.fmod([-3, -2, -1, 1, 2, 3], 2)
    print '求余3:\n', np.remainder([-3, -2, -1, 1, 2, 3], 2)
    print '小数+整数:\n', np.modf([0, 3.5])

    ###     正负符号    ###
    print '负数返回True:\n', np.signbit(np.array([1, -2.3, 2.1]))
    # 小于0返回-1, 等于0返回0, 大于0返回1
    print '正负符号:\n', np.sign([-5., 0, 4.5])
    # 将x2的符号应用到x1上。
    print '符号拷贝:\n', np.copysign([-1, 0, 1], -1.1)
    # 分解公式:y = x1 * 2**x2
    x1, x2 = np.frexp(np.arange(9))
    print '指数分解:\n', x1, x2
    # 构建公式:y = x1 * 2**x2,是分解公式的逆向操作
    print '指数构建:\n', np.ldexp(x1, x2)

    ###     复数    ###
    print '复数转角度:\n', np.angle([1.0, 1.0j, 1+1j])
    print '返回实数部分:\n', np.real(np.array([1+2j, 3+4j, 5+6j]))
    print '返回实数部分(虚数接近0):\n', np.real_if_close([2.1 + 4e-14j], tol=1000)
    print '返回虚数部分:\n', np.imag(np.array([1+2j, 3+4j, 5+6j]))
    print '返回复数共轭矩阵:\n', np.conj(np.eye(2) + 1j * np.eye(2))

    ###     杂项    ###
    print '卷积:\n', np.convolve([1, 2, 3], [0, 1, 0.5])
    # 根据指定的区间,小于区间最小值的赋值为最小值,大于区间最大值的赋值为最大值
    print '截断:\n', np.clip(np.arange(10), 3, 6)
    print '平方根:\n', np.sqrt([4, -1, -3+4J])
    print '平方:\n', np.square([-1j, 1])
    print '绝对值(含复数):\n', np.absolute([-1, -2])
    print '绝对值2:\n', np.fabs([-1, -2])
    print '最大值(含nan):\n', np.maximum([2, 3, np.nan], [1, 5, 2])
    print '最小值(含nan):\n', np.minimum([2, 3, np.nan], [1, 5, 2])
    print '最大值(忽略nan):\n', np.fmax([2, 3, np.nan], [1, 5, 2])
    print '最小值(忽略nan):\n', np.fmin([2, 3, np.nan], [1, 5, 2])
    # 在由xp和fp组成的坐标点中插值
    print '线性插值:\n', np.interp([0, 1, 1.5, 2.72, 3.14], xp=[1, 2, 3], fp=[3, 2, 0])
Example #41
0
    def test_ldexp_invalid(self):
        with pytest.raises(TypeError):
            np.ldexp(3. * u.m, 4.)

        with pytest.raises(TypeError):
            np.ldexp(3., u.Quantity(4, u.m, dtype=int))
Example #42
0
 def test_ldexp_array(self):
     assert np.all(np.ldexp(np.array([1., 2., 3.]) * u.m, [3, 2, 1])
                   == np.array([8., 8., 6.]) * u.m)
Example #43
0
 def test_ldexp_scalar(self):
     assert np.ldexp(4. * u.m, 2) == 16. * u.m
Example #44
0
    def init_compound(self):
        """

        """
        if self.Q_compound is not None:
            raise Exception(
                    'compound attributes should be initialized only once')
        if self.ncompound > 1e6:
            raise Exception(
                    'the compound state space is too big')

        # Define a compound state space.
        self.compound_to_primary = []
        self.compound_to_tolerances = []
        for primary, tolerances in itertools.product(
                range(self.nprimary),
                itertools.product((0, 1), repeat=self.nparts)):
            self.compound_to_primary.append(primary)
            self.compound_to_tolerances.append(tolerances)

        # Define the sparse distribution over compound states.
        self.compound_distn = {}
        for i, (primary, tolerances) in enumerate(
                zip(self.compound_to_primary, self.compound_to_tolerances)):
            part = self.primary_to_part[primary]
            if tolerances[part] == 1:
                p_primary = self.primary_distn[primary]
                p_tolerances = 1.0
                for tolerance_class, tolerance_state in enumerate(tolerances):
                    if tolerance_class != part:
                        p_tolerances *= self.tolerance_distn[tolerance_state]
                self.compound_distn[i] = p_primary * p_tolerances

        # Check the number of entries in the compound state distribution.
        if len(self.compound_distn) != np.ldexp(self.nprimary, self.nparts-1):
            raise Exception('internal error')

        # Check that the distributions have the correct normalization.
        # The loop is unrolled to better isolate errors.
        if not np.allclose(sum(self.primary_distn.values()), 1):
            raise Exception('internal error')
        if not np.allclose(sum(self.tolerance_distn.values()), 1):
            raise Exception('internal error')
        if not np.allclose(sum(self.compound_distn.values()), 1):
            raise Exception('internal error')

        # Define the compound transition rate matrix.
        # Use compound_distn to avoid formal states with zero probability.
        # This is slow, but we do not need to be fast.
        self.Q_compound = nx.DiGraph()
        for i in self.compound_distn:
            for j in self.compound_distn:
                if i == j:
                    continue
                i_prim = self.compound_to_primary[i]
                j_prim = self.compound_to_primary[j]
                i_tols = self.compound_to_tolerances[i]
                j_tols = self.compound_to_tolerances[j]
                tol_pairs = list(enumerate(zip(i_tols, j_tols)))
                tol_diffs = [(k, x, y) for k, (x, y) in tol_pairs if x != y]
                tol_hdist = len(tol_diffs)

                # Look for a tolerance state change.
                # Do not allow simultaneous primary and tolerance changes.
                # Do not allow more than one simultaneous tolerance change.
                # Do not allow changes to the primary tolerance class.
                if tol_hdist > 0:
                    if i_prim != j_prim:
                        continue
                    if tol_hdist > 1:
                        continue
                    part, i_tol, j_tol = tol_diffs[0]
                    if part == self.primary_to_part[i_prim]:
                        continue

                    # Add the transition rate.
                    if j_tol:
                        weight = self.rate_on
                    else:
                        weight = self.rate_off
                    self.Q_compound.add_edge(i, j, weight=weight)

                # Look for a primary state change.
                # Do not allow simultaneous primary and tolerance changes.
                # Do not allow a change to a non-tolerated primary class.
                # Do not allow transitions that have zero rate
                # in the primary process.
                if i_prim != j_prim:
                    if tol_hdist > 0:
                        continue
                    if not i_tols[self.primary_to_part[j_prim]]:
                        continue
                    if not self.Q_primary.has_edge(i_prim, j_prim):
                        continue
                    
                    # Add the primary state transition rate.
                    weight = self.Q_primary[i_prim][j_prim]['weight']
                    self.Q_compound.add_edge(i, j, weight=weight)
import numpy as np

np.ldexp(5, np.arange(4))
x = np.arange(6)
np.ldexp(*np.frexp(x))
Example #46
0
 def test_ldexp_array(self):
     assert np.all(np.ldexp(np.array([1.0, 2.0, 3.0]) * u.m, [3, 2, 1]) == np.array([8.0, 8.0, 6.0]) * u.m)
Example #47
0
def n_ldexp(a, b):
    """safe ldexp"""
    return np.ldexp(a, intify(b))
Example #48
0
def ldexp_usecase(x, y, result):
    np.ldexp(x, y, result)