Example #1
1
def genTrueFreqTracks(tStamps):
    # Generates the true frequency values to compute estimation error
    # Specifically to sines-440-602-hRange.wav
    fTrack = np.zeros((len(tStamps), 2))
    fTrack[:, 0] = np.transpose(440 * np.ones((len(tStamps), 1)))
    fTrack[:, 1] = np.transpose(602 * np.ones((len(tStamps), 1)))
    return fTrack
Example #2
1
def cartadd(x, y):
    """
    Cartesian addition of two fuzzy membership vectors; algebraic method.

    Parameters
    ----------
    x : 1D array or iterable
        First fuzzy membership vector, of length M.
    y : 1D array or iterable
        Second fuzzy membership vector, of length N.

    Returns
    -------
    z : 2D array
        Cartesian addition of x and y, of shape (M, N).

    """
    # Ensure rank-1 input
    x, y = np.asarray(x).ravel(), np.asarray(y).ravel()

    m, n = len(x), len(y)

    a = np.dot(np.atleast_2d(x).T, np.ones((1, n)))
    b = np.dot(np.ones((m, 1)), np.atleast_2d(y))

    return a + b
Example #3
1
    def _set_optimization_variables_lower_bounds(self, umin, qmin, xmin, x0):

        umin_user_provided = umin

        umin = inputchecks.check_controls_data(umin, self._discretization.system.nu, 1)

        if umin_user_provided is None:

            umin = -np.inf * np.ones(umin.shape)

        Umin = ci.repmat(umin, 1, self._discretization.optimization_variables["U"].shape[1])

        qmin_user_provided = qmin

        qmin = inputchecks.check_constant_controls_data(qmin, self._discretization.system.nq)

        if qmin_user_provided is None:

            qmin = -np.inf * np.ones(qmin.shape)

        Qmin = qmin

        xmin_user_provided = xmin

        xmin = inputchecks.check_states_data(xmin, self._discretization.system.nx, 0)

        if xmin_user_provided is None:

            xmin = -np.inf * np.ones(xmin.shape)

        Xmin = ci.repmat(xmin, 1, self._discretization.optimization_variables["X"].shape[1])

        Xmin[:, 0] = x0

        self._optimization_variables_lower_bounds = ci.veccat([Umin, Qmin, Xmin])
Example #4
1
def test_random_sizes():
    # make sure the size is not a problem

    niter = 10
    elem = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.uint8)
    for m, n in np.random.random_integers(1, 100, size=(10, 2)):
        mask = np.ones((m, n), dtype=np.uint8)

        image8 = np.ones((m, n), dtype=np.uint8)
        out8 = np.empty_like(image8)
        rank.mean(image=image8, selem=elem, mask=mask, out=out8, shift_x=0, shift_y=0)
        assert_array_equal(image8.shape, out8.shape)
        rank.mean(image=image8, selem=elem, mask=mask, out=out8, shift_x=+1, shift_y=+1)
        assert_array_equal(image8.shape, out8.shape)

        image16 = np.ones((m, n), dtype=np.uint16)
        out16 = np.empty_like(image8, dtype=np.uint16)
        rank.mean(image=image16, selem=elem, mask=mask, out=out16, shift_x=0, shift_y=0)
        assert_array_equal(image16.shape, out16.shape)
        rank.mean(image=image16, selem=elem, mask=mask, out=out16, shift_x=+1, shift_y=+1)
        assert_array_equal(image16.shape, out16.shape)

        rank.percentile_mean(image=image16, mask=mask, out=out16, selem=elem, shift_x=0, shift_y=0, p0=0.1, p1=0.9)
        assert_array_equal(image16.shape, out16.shape)
        rank.percentile_mean(image=image16, mask=mask, out=out16, selem=elem, shift_x=+1, shift_y=+1, p0=0.1, p1=0.9)
        assert_array_equal(image16.shape, out16.shape)
Example #5
1
def fuzzy_min(x, A, y, B):
    """
    Finds minimum between fuzzy set A in universe x and fuzzy set B in
    universe y.

    Parameters
    ----------
    x : 1d array, length N
        Universe variable for fuzzy set A.
    A : 1d array, length N
        Fuzzy set for universe x.
    y : 1d array, length M
        Universe variable for fuzzy set B.
    B : 1d array, length M
        Fuzzy set for universe y.

    Returns
    -------
    z : 1d array
        Output variable.
    mfz : 1d array
        Fuzzy membership set for variable z.

    Note
    ----
    Uses Zadeh's Extension Principle from Ross, Fuzzy Logic w/Engineering
    Applications, (2010), pp.414, Eq. 12.17.

    """
    # A and x, and B and y, are formed into (MxN) matrices.  The former has
    # identical rows; the latter identical identical columns.
    N = len(B)
    AA = np.dot(np.atleast_2d(A).T, np.ones((1, N)))
    X = np.dot(np.atleast_2d(x).T, np.ones((1, N)))
    M = len(A)
    BB = np.dot(np.ones((M, 1)), np.atleast_2d(B))
    Y = np.dot(np.ones((M, 1)), np.atleast_2d(y))

    # Take the element-wise minimum
    Z = np.fmin(X, Y).ravel()
    Z_index = np.argsort(Z)
    Z = np.sort(Z)

    # Array min() operation
    C = np.fmin(AA, BB).ravel()
    C = C[Z_index]

    # Initialize loop
    z, mfz = np.empty(0), np.empty(0)
    idx = 0

    for i in range(len(C)):
        index = np.nonzero(Z == Z[idx])[0]
        z = np.hstack((z, Z[idx]))
        mfz = np.hstack((mfz, C[index].max()))
        if Z[idx] == Z.max():
            break
        idx = index.max() + 1

    return z, mfz
Example #6
1
    def test_square_matrices_1(self):
        op4 = OP4()
        # matrices = op4.read_op4(os.path.join(op4Path, fname))
        form1 = 1
        form2 = 2
        form3 = 2
        from numpy import matrix, ones, reshape, arange

        A1 = matrix(ones((3, 3), dtype="float64"))
        A2 = reshape(arange(9, dtype="float64"), (3, 3))
        A3 = matrix(ones((1, 1), dtype="float32"))
        matrices = {"A1": (form1, A1), "A2": (form2, A2), "A3": (form3, A3)}

        for (is_binary, fname) in [(False, "small_ascii.op4"), (True, "small_binary.op4")]:
            op4_filename = os.path.join(op4Path, fname)
            op4.write_op4(op4_filename, matrices, name_order=None, precision="default", is_binary=False)
            matrices2 = op4.read_op4(op4_filename, precision="default")
            (form1b, A1b) = matrices2["A1"]
            (form2b, A2b) = matrices2["A2"]
            self.assertEqual(form1, form1b)
            self.assertEqual(form2, form2b)

            (form1b, A1b) = matrices2["A1"]
            (form2b, A2b) = matrices2["A2"]
            (form3b, A3b) = matrices2["A3"]
            self.assertEqual(form1, form1b)
            self.assertEqual(form2, form2b)
            self.assertEqual(form3, form3b)

            self.assertTrue(array_equal(A1, A1b))
            self.assertTrue(array_equal(A2, A2b))
            self.assertTrue(array_equal(A3, A3b))
            del A1b, A2b, A3b
            del form1b, form2b, form3b
Example #7
1
def center_data(X, y, fit_intercept, normalize=False, copy=True, sample_weight=None):
    """
    Centers data to have mean zero along axis 0. This is here because
    nearly all linear models will want their data to be centered.

    If sample_weight is not None, then the weighted mean of X and y
    is zero, and not the mean itself
    """
    X = as_float_array(X, copy)
    if fit_intercept:
        if isinstance(sample_weight, numbers.Number):
            sample_weight = None
        if sp.issparse(X):
            X_mean = np.zeros(X.shape[1])
            X_std = np.ones(X.shape[1])
        else:
            X_mean = np.average(X, axis=0, weights=sample_weight)
            X -= X_mean
            if normalize:
                # XXX: currently scaled to variance=n_samples
                X_std = np.sqrt(np.sum(X ** 2, axis=0))
                X_std[X_std == 0] = 1
                X /= X_std
            else:
                X_std = np.ones(X.shape[1])
        y_mean = np.average(y, axis=0, weights=sample_weight)
        y = y - y_mean
    else:
        X_mean = np.zeros(X.shape[1])
        X_std = np.ones(X.shape[1])
        y_mean = 0.0 if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
    return X, y, X_mean, y_mean, X_std
Example #8
1
def cartprod(x, y):
    """
    Cartesian product of two fuzzy membership vectors. Uses `min()`.

    Parameters
    ----------
    x : 1D array or iterable
        First fuzzy membership vector, of length M.
    y : 1D array or iterable
        Second fuzzy membership vector, of length N.

    Returns
    -------
    z : 2D array
        Cartesian product of x and y, of shape (M, N).

    """
    # Ensure rank-1 input
    x, y = np.asarray(x).ravel(), np.asarray(y).ravel()

    m, n = len(x), len(y)

    a = np.dot(np.atleast_2d(x).T, np.ones((1, n)))
    b = np.dot(np.ones((m, 1)), np.atleast_2d(y))

    return np.fmin(a, b)
Example #9
0
def test_pass_on_bitdepth():
    # should pass because data bitdepth is not too high for the function

    image = np.ones((100, 100), dtype=np.uint16) * 2 ** 11
    elem = np.ones((3, 3), dtype=np.uint8)
    out = np.empty_like(image)
    mask = np.ones(image.shape, dtype=np.uint8)
Example #10
0
def SegIntersect(A1, A2, B1, B2):
    """The function returns the intersection or the points of closest approach if lines are skewed.
    If lines are parallel, NaN is returned.
    INPUT:
        A1  -float(3,n), [x,y,z;nsegments] cordinates of 1st point(s) of 1st segment(s)
        A2  -float(3,n), [x,y,z;nsegments] cordinates of 2nd point(s) of 1st segment(s)
        B1  -float(3,n), [x,y,z;nsegments] cordinates of 1st point(s) of 2nd segment(s)
        B2  -float(3,n), [x,y,z;nsegments] cordinates of 2nd point(s) of 2nd segment(s)
    OUTPUT:
        A0  -float(3,n), [x,y,z;nsegments] coordinates of intersection point (=B0) or closet point to 2nd line on 1st segment,
        B0  -float(3,n), [x,y,z;nsegments] coordinates of intersection point (=A0) or closet point to 1st line on 2nd segment,
        OR  -NaN
    """

    # reshape A1..B2 in case they have 1 dimension only
    A1 = A1.reshape(3, -1)
    A2 = A2.reshape(3, -1)
    B1 = B1.reshape(3, -1)
    B2 = B2.reshape(3, -1)

    vec = np.cross(A2 - A1, B2 - B1, 0, 0, 0)
    nA = np.sum(np.cross(B2 - B1, A1 - B1, 0, 0, 0) * vec, axis=0) * np.ones(A1.shape[1])
    nB = np.sum(np.cross(A2 - A1, A1 - B1, 0, 0, 0) * vec, axis=0) * np.ones(A1.shape[1])
    d = np.sum(vec ** 2, axis=0) * np.ones(A1.shape[1])

    A0 = np.ones(A1.shape) * np.NaN
    B0 = A0.copy()
    idx = np.nonzero(d)[0]
    A0[:, idx] = A1[:, idx] + (nA[idx] / d[idx]) * (A2[:, idx] - A1[:, idx])
    B0[:, idx] = B1[:, idx] + (nB[idx] / d[idx]) * (B2[:, idx] - B1[:, idx])

    return A0, B0
Example #11
0
 def test_pressure_network_no_gradient(self):
     """root/lm without gradient, equal pipes -> equal flows"""
     k = np.ones(4) * 0.5
     Qtot = 4
     initial_guess = array([2.0, 0.0, 2.0, 0.0])
     final_flows = optimize.root(pressure_network, initial_guess, method="lm", args=(Qtot, k)).x
     assert_array_almost_equal(final_flows, np.ones(4))
Example #12
0
 def test_pressure_network_with_gradient(self):
     """fsolve with gradient, equal pipes -> equal flows"""
     k = np.ones(4) * 0.5
     Qtot = 4
     initial_guess = array([2.0, 0.0, 2.0, 0.0])
     final_flows = optimize.fsolve(pressure_network, initial_guess, args=(Qtot, k), fprime=pressure_network_jacobian)
     assert_array_almost_equal(final_flows, np.ones(4))
Example #13
0
    def __init__(self, inputNum, hiddenNum, outputNum, dataNum, l):
        """
        input: the number of input neurons (in this case features)
        hidden: the number of hidden neurons (should be tuned)
        output: the number of output neurons (the classifications of image)
        l: lambda
        """
        self.input = inputNum  # without bias node
        self.hidden = hiddenNum  # without bias node
        self.output = outputNum
        self.dataNum = dataNum
        self.l = l

        "allocate memory for activation matrix of 1s"
        self.inputActivation = np.ones((self.input + 1, dataNum))  # add bias node
        self.hiddenActivation = np.ones((self.hidden + 1, dataNum))  # add bias node
        self.outputActivation = np.ones((self.output, dataNum))

        "allocate memory for bias vector"
        self.bias = np.ones((1, dataNum))

        "allocate memory for change matrix of 0s"
        self.inputChange = np.zeros((self.hidden, self.input + 1))
        self.outputChange = np.zeros((self.output, self.hidden + 1))

        "calculate epsilon for randomization"
        self.hiddenEpsilon = np.sqrt(6.0 / (self.input + self.hidden))
        self.outputEpsilon = np.sqrt(6.0 / (self.input + self.output))

        "allocate memory for randomized weights"
        self.inputWeights = np.random.rand(self.hidden, self.input + 1) * 2 * self.hiddenEpsilon - self.hiddenEpsilon
        self.outputWeights = np.random.rand(self.output, self.hidden + 1) * 2 * self.outputEpsilon - self.outputEpsilon
Example #14
0
def sparse_center_data(X, y, fit_intercept, normalize=False):
    """
    Compute information needed to center data to have mean zero along
    axis 0. Be aware that X will not be centered since it would break
    the sparsity, but will be normalized if asked so.
    """
    if fit_intercept:
        # we might require not to change the csr matrix sometimes
        # store a copy if normalize is True.
        # Change dtype to float64 since mean_variance_axis accepts
        # it that way.
        if sp.isspmatrix(X) and X.getformat() == "csr":
            X = sp.csr_matrix(X, copy=normalize, dtype=np.float64)
        else:
            X = sp.csc_matrix(X, copy=normalize, dtype=np.float64)

        X_mean, X_var = mean_variance_axis(X, axis=0)
        if normalize:
            # transform variance to std in-place
            # XXX: currently scaled to variance=n_samples to match center_data
            X_var *= X.shape[0]
            X_std = np.sqrt(X_var, X_var)
            del X_var
            X_std[X_std == 0] = 1
            inplace_column_scale(X, 1.0 / X_std)
        else:
            X_std = np.ones(X.shape[1])
        y_mean = y.mean(axis=0)
        y = y - y_mean
    else:
        X_mean = np.zeros(X.shape[1])
        X_std = np.ones(X.shape[1])
        y_mean = 0.0 if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)

    return X, y, X_mean, y_mean, X_std
Example #15
0
def expand(A):
    M, N = A.shape
    t = np.kron(A.flatten(), np.ones(N))
    u = np.triu(np.ones((N, N))).flatten()
    v = np.kron(np.ones(M), u)
    w = t * v
    return w.reshape(M, N, N).swapaxes(1, 2)
Example #16
0
def estimate_threshold(in1, edge_excl=0, int_excl=0):
    """
    This function estimates the noise using the MAD estimator.

    INPUTS:
    in1             (no default):   The array from which the noise is estimated

    OUTPUTS:
    out1                            An array of per-scale noise estimates.
    """

    out1 = np.empty([in1.shape[0]])
    mid = in1.shape[1] / 2

    if (edge_excl != 0) | (int_excl != 0):

        if edge_excl != 0:
            mask = np.zeros([in1.shape[1], in1.shape[2]])
            mask[edge_excl:-edge_excl, edge_excl:-edge_excl] = 1
        else:
            mask = np.ones([in1.shape[1], in1.shape[2]])

        if int_excl != 0:
            mask[mid - int_excl : mid + int_excl, mid - int_excl : mid + int_excl] = 0

    else:
        mask = np.ones([in1.shape[1], in1.shape[2]])

    for i in range(in1.shape[0]):
        out1[i] = np.median(np.abs(in1[i, mask == 1])) / 0.6745

    return out1
Example #17
0
def test_cartesian_operations():
    """
    more tests of CartesianPoints beyond those in test_api
    """
    import numpy as np

    from .. import Longitude, Latitude
    from .. import CartesianPoints

    c = CartesianPoints(np.ones(10), np.ones(10), np.ones(10), unit=u.kpc)

    c2 = c + c
    assert c2.y[2].value == 2
    assert c2.unit == c.unit

    c3 = c - c
    assert c3[1, 2].value == 0

    r, lat, lon = c.to_spherical()

    assert r.unit == c.unit
    assert isinstance(lat, Latitude)
    assert isinstance(lon, Longitude)

    c4 = c * 3
    assert c4.unit == c.unit

    # always preserve the CartesianPoint's units
    c5 = 3 * u.pc + c
    assert c5.unit == c.unit
Example #18
0
    def corners(self):
        xy1 = numpy.ones((3, 4))
        xy1[0, 0] = -self.outSize[0] / 2
        xy1[0, 1] = -self.outSize[0] / 2
        xy1[0, 2] = self.outSize[0] / 2
        xy1[0, 3] = self.outSize[0] / 2
        xy1[1, 0] = -self.outSize[1] / 2
        xy1[1, 1] = self.outSize[1] / 2
        xy1[1, 2] = self.outSize[1] / 2
        xy1[1, 3] = -self.outSize[1] / 2

        mapMatrix = numpy.ones((2, 3))
        mapMatrix[0, 0] = self.mapMatrix()[0, 0]
        mapMatrix[0, 1] = self.mapMatrix()[0, 1]
        mapMatrix[0, 2] = self.mapMatrix()[0, 2]
        mapMatrix[1, 0] = self.mapMatrix()[1, 0]
        mapMatrix[1, 1] = self.mapMatrix()[1, 1]
        mapMatrix[1, 2] = self.mapMatrix()[1, 2]

        xy2 = numpy.dot(mapMatrix, xy1)
        polygonCorners = (
            (int(xy2[0, 0]), int(xy2[1, 0])),
            (int(xy2[0, 1]), int(xy2[1, 1])),
            (int(xy2[0, 2]), int(xy2[1, 2])),
            (int(xy2[0, 3]), int(xy2[1, 3])),
        )
        return polygonCorners
Example #19
0
def test_alloc_memset_0():
    i = tensor.iscalar()
    z = numpy.zeros((1,), dtype="float32")
    o = numpy.ones((1,), dtype="float32")
    ones = numpy.ones((2,), dtype="float32")

    # Test with 0
    a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(z)), i)
    f = theano.function([i], a, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 1
    assert isinstance(topo[0].op, basic_ops.GpuAlloc) and topo[0].op.memset_0
    assert (numpy.asarray(f(6)) == 0).all()

    # Test with 1
    a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(o)), i)
    f = theano.function([i], a, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 1
    assert isinstance(topo[0].op, basic_ops.GpuAlloc)
    assert not topo[0].op.memset_0
    assert (numpy.asarray(f(6)) == 1).all()

    # Test with 1, 1
    a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(ones)), i)
    f = theano.function([i], a, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 1
    assert isinstance(topo[0].op, basic_ops.GpuAlloc)
    assert not topo[0].op.memset_0
    assert (numpy.asarray(f(2)) == 1).all()
Example #20
0
 def testLSTMBlockCell(self):
     with self.test_session(use_gpu=self._use_gpu, graph=tf.Graph()) as sess:
         with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
             x = tf.zeros([1, 2])
             m0 = tf.zeros([1, 2])
             m1 = tf.zeros([1, 2])
             m2 = tf.zeros([1, 2])
             m3 = tf.zeros([1, 2])
             g, ((out_m0, out_m1), (out_m2, out_m3)) = tf.nn.rnn_cell.MultiRNNCell(
                 [tf.contrib.rnn.LSTMBlockCell(2)] * 2, state_is_tuple=True
             )(x, ((m0, m1), (m2, m3)))
             sess.run([tf.global_variables_initializer()])
             res = sess.run(
                 [g, out_m0, out_m1, out_m2, out_m3],
                 {
                     x.name: np.array([[1.0, 1.0]]),
                     m0.name: 0.1 * np.ones([1, 2]),
                     m1.name: 0.1 * np.ones([1, 2]),
                     m2.name: 0.1 * np.ones([1, 2]),
                     m3.name: 0.1 * np.ones([1, 2]),
                 },
             )
             self.assertEqual(len(res), 5)
             self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
             # These numbers are from testBasicLSTMCell and only test c/h.
             self.assertAllClose(res[1], [[0.68967271, 0.68967271]])
             self.assertAllClose(res[2], [[0.44848421, 0.44848421]])
             self.assertAllClose(res[3], [[0.39897051, 0.39897051]])
             self.assertAllClose(res[4], [[0.24024698, 0.24024698]])
Example #21
0
def test_entropy():
    #  verify that entropy is coherent with bitdepth of the input data

    selem = np.ones((16, 16), dtype=np.uint8)
    # 1 bit per pixel
    data = np.tile(np.asarray([0, 1]), (100, 100)).astype(np.uint8)
    assert np.max(rank.entropy(data, selem)) == 10

    # 2 bit per pixel
    data = np.tile(np.asarray([[0, 1], [2, 3]]), (10, 10)).astype(np.uint8)
    assert np.max(rank.entropy(data, selem)) == 20

    # 3 bit per pixel
    data = np.tile(np.asarray([[0, 1, 2, 3], [4, 5, 6, 7]]), (10, 10)).astype(np.uint8)
    assert np.max(rank.entropy(data, selem)) == 30

    # 4 bit per pixel
    data = np.tile(np.reshape(np.arange(16), (4, 4)), (10, 10)).astype(np.uint8)
    assert np.max(rank.entropy(data, selem)) == 40

    # 6 bit per pixel
    data = np.tile(np.reshape(np.arange(64), (8, 8)), (10, 10)).astype(np.uint8)
    assert np.max(rank.entropy(data, selem)) == 60

    # 8-bit per pixel
    data = np.tile(np.reshape(np.arange(256), (16, 16)), (10, 10)).astype(np.uint8)
    assert np.max(rank.entropy(data, selem)) == 80

    # 12 bit per pixel
    selem = np.ones((64, 64), dtype=np.uint8)
    data = np.tile(np.reshape(np.arange(4096), (64, 64)), (2, 2)).astype(np.uint16)
    assert np.max(rank.entropy(data, selem)) == 12000
Example #22
0
    def test_make_function_return_value(self):
        """Check that make() return value meets expectation"""
        pvals = self.ff.pvals0

        self.logger.debug("Running forcefield.make() with zero vector should not change pvals... ")
        new_pvals = np.array(self.ff.make(np.zeros(self.ff.np)))
        self.assertEqual(pvals.size,new_pvals.size)
        # given zero matrix, make should return unchanged pvals
        self.assertEqual(pvals,new_pvals,
                    msg="\nmake() should produce unchanged pvals when given zero vector")
        self.logger.debug("ok\n")

        self.logger.debug("make() should return different values when passed in nonzero pval matrix... ")
        new_pvals = np.array(self.ff.make(np.ones(self.ff.np)))
        self.assertEqual(pvals.size,new_pvals.size)
        # given arbitrary nonzero input, make should return new pvals
        self.assertFalse((pvals==new_pvals).all(), msg="\nmake() returned unchanged pvals even when given nonzero matrix")
        self.logger.debug("ok\n")

        self.logger.debug("make(use_pvals=True) should return the same pvals... ")
        new_pvals = np.array(self.ff.make(np.ones(self.ff.np),use_pvals=True))
        self.assertEqual(np.ones(self.ff.np),new_pvals, msg="\nmake() did not return input pvals with use_pvals=True")
        self.logger.debug("ok\n")

        os.remove(self.options['root'] + '/' + self.ff.fnms[0])
Example #23
0
    def train(self, category):
        training_x, training_y, validation_x, validation_y = self.SplitData(category)
        # Extreme Learning Machine
        # Simple enough for small data, can be scaled if you want. GPU scaling.
        best_prediction = +np.Inf
        neuron = [5, 8, 11, 14, 17, 20, 40, 60, 80]
        for no_neuron in neuron:
            W = np.random.rand(training_x.shape[1] + 1, no_neuron)
            X = np.concatenate((training_x, np.ones((training_x.shape[0], 1))), axis=1)
            Y = training_y
            H = self.mysigmoid(np.dot(X, W))
            B = np.dot(np.dot(np.linalg.inv(np.dot(H.T, H) + 0.0001 * np.eye(H.shape[1])), H.T), Y)

            X_val = np.concatenate((validation_x, np.ones((validation_x.shape[0], 1))), axis=1)
            Y_val = validation_y
            prediction_val = np.dot(self.mysigmoid(np.dot(X_val, W)), B)
            err = np.var((prediction_val - Y_val)) / np.var(Y_val)
            if err < best_prediction:
                B_best = B
                W_best = W
                best_prediction = err
                NO = no_neuron
                Yt_best = np.dot(self.mysigmoid(np.dot(X, W)), B)
                prediction_val_best = np.dot(self.mysigmoid(np.dot(X_val, W)), B)
        print "The number of neuron is %d, and best performance is %f" % (NO, best_prediction)
        return B_best, W_best, Y_val, prediction_val_best
def data_gen():
    ### IN
    ## experiment stuff
    np.random.seed(1234)
    data_dir = "data"
    n_exp = 1  # number of experiments
    ## LDA stuff
    W = 25  #  word vocabulary
    L = int(np.sqrt(W))  # image size
    T = 2 * L  # topics
    D = 1000  # documents
    N = 100  # words per document
    alpha = 1.0  # hyper-param for mixture of topics (theta)
    beta = 1.0  # hyper-param for topic distribs (phi),
    # used only as param in pb

    # stan
    chains = 1

    # CV
    K = 5  # folds

    # phi is given as the horizontal and vertical topics on the 5X5 images
    phi = [np.zeros((L, L)) for i in range(T)]
    line = 0
    for phi_t in phi:
        if line >= L:
            trueLine = int(line - L)
            phi_t[:, trueLine] = 1.0 / L * np.ones(L)
        else:
            phi_t[line] = 1.0 / L * np.ones(L)
        line += 1
    rewrite_dir(data_dir)
    [write_cv_data(K, data_dir, i, W, L, T, D, N, phi, alpha, beta, chains) for i in range(n_exp)]
Example #25
0
    def seed_elements(self, *args, **kwargs):

        if "number" not in kwargs:
            number = 1
        else:
            number = kwargs["number"]
        if "diameter" in kwargs:
            logging.info("Droplet diameter is provided, and will " "be kept constant during simulation")
            self.keep_droplet_diameter = True
        else:
            self.keep_droplet_diameter = False
        if "z" not in kwargs:
            kwargs["z"] = 0
        if kwargs["z"] == "seafloor":
            z = -np.ones(number)
        else:
            z = np.atleast_1d(kwargs["z"])
        if len(z) == 1:
            z = z * np.ones(number)  # Convert scalar z to array
        subsea = z < 0
        if np.sum(subsea) > 0 and "diameter" not in kwargs:
            # Droplet min and max for particles seeded below sea surface
            sub_dmin = self.get_config("input:spill:droplet_diameter_min_subsea")
            sub_dmax = self.get_config("input:spill:droplet_diameter_max_subsea")
            logging.info(
                "Using particle diameters between %s and %s m for "
                "elements seeded below sea surface." % (sub_dmin, sub_dmax)
            )
            kwargs["diameter"] = np.random.uniform(sub_dmin, sub_dmax, number)

        super(OpenOil3D, self).seed_elements(*args, **kwargs)
Example #26
0
    def test_half_funcs(self):
        """Test the various ArrFuncs"""

        # fill
        assert_equal(np.arange(10, dtype=float16), np.arange(10, dtype=float32))

        # fillwithscalar
        a = np.zeros((5,), dtype=float16)
        a.fill(1)
        assert_equal(a, np.ones((5,), dtype=float16))

        # nonzero and copyswap
        a = np.array([0, 0, -1, -1 / 1e20, 0, 2.0 ** -24, 7.629e-6], dtype=float16)
        assert_equal(a.nonzero()[0], [2, 5, 6])
        a = a.byteswap().newbyteorder()
        assert_equal(a.nonzero()[0], [2, 5, 6])

        # dot
        a = np.arange(0, 10, 0.5, dtype=float16)
        b = np.ones((20,), dtype=float16)
        assert_equal(np.dot(a, b), 95)

        # argmax
        a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16)
        assert_equal(a.argmax(), 4)
        a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16)
        assert_equal(a.argmax(), 5)

        # getitem
        a = np.arange(10, dtype=float16)
        for i in range(10):
            assert_equal(a.item(i), i)
Example #27
0
def covConst(hyp=None, x=None, z=None, der=None):
    """Covariance function for a constant function.
    The covariance function is parameterized as:
    k(x^p,x^q) = sf2 

    The scalar hyperparameter is:
    hyp = [ log(sqrt(sf2)) ]
    """

    if hyp == None:  # report number of parameters
        return [1]

    sf2 = np.exp(2.0 * hyp[0])  # s2

    n, m = x.shape
    if z == "diag":
        A = sf2 * np.ones((n, 1))
    elif z == None:
        A = sf2 * np.ones((n, n))
    else:
        A = sf2 * np.ones((n, z.shape[0]))

    if der == 0:  # compute derivative matrix wrt sf2
        A = 2.0 * A
    elif der:
        raise Exception("Wrong derivative entry in covConst")
    return A
def sample_choice(prob_array, method="MC"):
    """sample chosen index given probability in prob_array
    prob_array - 2-d array for probability of being chosen, with probablities for a agent at
                 one row, and probabilities for alternatives at columns
    method - the method used to sample choice, either MC (Monte Carlo) or max_prob
    """

    if prob_array.ndim <> 2:
        raise RuntimeError, "prob_array must be a 2d array"

    rows, columns = prob_array.shape
    sum_prob_by_col = sum(prob_array, axis=1, dtype=float64)
    if not ma.allclose(sum_prob_by_col, ones((rows,))):
        strange_rows = where(sum_prob_by_col != ones((rows,)))
        raise RuntimeError, "prob_array must add up to 1 for each row. Abnormal rows: %s" % prob_array[strange_rows, :]

    if method.lower() == "mc":
        cum_prob = ncumsum(prob_array, axis=1)

        R = uniform(0, 1, rows)
        R.resize((rows, 1))

        match = R < cum_prob
        choices = argmax(match, axis=1)  # return the first index of 1 in each row
    elif method.lower() == "max_prob":
        choices = argmax(prob_array)

    if choices.size <> rows:
        raise RuntimeError, "having problems sample choice"

    return (arange(rows), choices)
Example #29
0
    def _set_optimization_variables_upper_bounds(self, umax, qmax, xmax, x0):

        umax_user_provided = umax

        umax = inputchecks.check_controls_data(umax, self._discretization.system.nu, 1)

        if umax_user_provided is None:

            umax = np.inf * np.ones(umax.shape)

        Umax = ci.repmat(umax, 1, self._discretization.optimization_variables["U"].shape[1])

        qmax_user_provided = qmax

        qmax = inputchecks.check_constant_controls_data(qmax, self._discretization.system.nq)

        if qmax_user_provided is None:

            qmax = -np.inf * np.ones(qmax.shape)

        Qmax = qmax

        xmax_user_provided = xmax

        xmax = inputchecks.check_states_data(xmax, self._discretization.system.nx, 0)

        if xmax_user_provided is None:

            xmax = np.inf * np.ones(xmax.shape)

        Xmax = ci.repmat(xmax, 1, self._discretization.optimization_variables["X"].shape[1])

        Xmax[:, 0] = x0

        self._optimization_variables_upper_bounds = ci.veccat([Umax, Xmax])
Example #30
0
def old_rprop(
    param,
    learning_rate,
    gparam,
    mask,
    updates,
    current_cost,
    previous_cost,
    eta_plus=1.5,
    eta_minus=0.5,
    max_delta=50,
    min_delta=10e-8,
):
    previous_grad = sharedX(numpy.ones(param.shape.eval()), borrow=True)
    delta = sharedX(learning_rate * numpy.ones(param.shape.eval()), borrow=True)
    previous_inc = sharedX(numpy.zeros(param.shape.eval()), borrow=True)
    zero = T.zeros_like(param)
    one = T.ones_like(param)
    change = previous_grad * gparam

    new_delta = T.clip(
        T.switch(T.gt(change, 0.0), delta * eta_plus, T.switch(T.lt(change, 0.0), delta * eta_minus, delta)),
        min_delta,
        max_delta,
    )
    new_previous_grad = T.switch(T.gt(change, 0.0), gparam, T.switch(T.lt(change, 0.0), zero, gparam))
    inc = T.switch(
        T.gt(change, 0.0), -T.sgn(gparam) * new_delta, T.switch(T.lt(change, 0.0), zero, -T.sgn(gparam) * new_delta)
    )

    updates.append((previous_grad, new_previous_grad))
    updates.append((delta, new_delta))
    updates.append((previous_inc, inc))
    return param + inc * mask