Example #1
0
def _calculate_power(p_values, alpha=0.05, numeric=True):
    r"""Calculates statistical power empirically for p-values

    Parameters
    ----------
    p_values : 1-D array
        A 1-D numpy array with the test results.
    alpha : float
        The critical value for the power calculation.
    numeric : Boolean
        Indicates whether a numeric p value should be used

    Returns
    -------
    power : float
        The empirical power, or the fraction of observed p values below the
        critical value.

    """

    if numeric:
        reject = np.atleast_2d(p_values < alpha)
    else:
        reject = np.atleast_2d(p_values)

    w = (reject).sum(axis=1)/reject.shape[1]

    return w
Example #2
0
def angle_between_vectors(v1, v2, degree = True):
    ''' Computes the angle between two vectors.

    :Parameters:
        vector1: Vector 1.
                -type: numpy array

        vector2: Vector 2.
                -type: numpy array

        degree:  If true degrees is return, rad otherwise
                -type: numpy array

    :Returns:
        angle
        -type: scalar

    '''
    v1 = numx.atleast_2d(v1)
    v2 = numx.atleast_2d(v2)
    c = numx.dot(v1,v2.T)/(get_norms(v1,axis = 1)*get_norms(v2,axis = 1))
    c = numx.arccos(numx.clip(c, -1, 1))
    if degree:
        c = numx.degrees(c)
    return c
Example #3
0
def get_coeffs(data,mask,X,add_const=False):
	"""
	get_coeffs(data,X)

	Input:

	data has shape (nx,ny,nz,nt)
	mask has shape (nx,ny,nz)
	X    has shape (nt,nc)

	Output:

	out  has shape (nx,ny,nz,nc)
	""" 
	mdata = fmask(data,mask).transpose()
        
        X=np.atleast_2d(X)
        if X.shape[0]==1: X=X.T
        Xones = np.atleast_2d(np.ones(np.min(mdata.shape))).T
        if add_const: X = np.hstack([X,Xones])

	tmpbetas = np.linalg.lstsq(X,mdata)[0].transpose()
        if add_const: tmpbetas = tmpbetas[:,:-1]
	out = unmask(tmpbetas,mask)

	return out
Example #4
0
    def fit(self, X, y, learning_rate=0.2, epochs=10000):
        X = np.atleast_2d(X)
        temp = np.ones([X.shape[0], X.shape[1]+1])
        temp[:, 0:-1] = X
        X = temp
        y = np.array(y)

        for k in range(epochs):
            i = np.random.randint(X.shape[0])
            a = [X[i]]

            for l in range(len(self.weights)):
                a.append(self.activation(np.dot(a[l], self.weights[l])))

            error = y[i] - a[-1]
            # deltas 用于保存梯度项
            deltas = [error*self.activation_deriv(a[-1])]

            # 计算隐藏层的梯度项
            for l in range(len(a)-2, 0, -1):
                deltas.append(deltas[-1].dot(self.weights[l].T) * self.activation_deriv(a[l]))
            deltas.reverse()
            for i in range(len(self.weights)):
                layer = np.atleast_2d(a[i])
                delta = np.atleast_2d(deltas[i])
                self.weights[i] += learning_rate*layer.T.dot(delta)
Example #5
0
    def __call__(self, e1, e2=None, axis=1):
        """
        Method for calculating distances.

        :param e1: input data instances
        :type e1: :class:`Orange.data.Table` or :class:`Orange.data.RowInstance` or :class:`numpy.ndarray`
        :param e2: optional second argument for data instances
           if provided, distances between each pair, where first item is from e1 and second is from e2, are calculated
        :type e2: :class:`Orange.data.Table` or :class:`Orange.data.RowInstance` or :class:`numpy.ndarray`
        :param axis: if axis=1 we calculate distances between rows,
           if axis=0 we calculate distances between columns
        :type axis: int
        :return: the matrix with distances between given examples
        :rtype: :class:`Orange.misc.DistMatrix`
        """
        x1 = _orange_to_numpy(e1)
        x2 = _orange_to_numpy(e2)
        if axis == 0:
            x1 = x1.T
            if x2 is not None:
                x2 = x2.T
        if not sparse.issparse(x1):
            x1 = np.atleast_2d(x1)
        if e2 is not None and not sparse.issparse(x2):
            x2 = np.atleast_2d(x2)
        dist = skl_metrics.pairwise.pairwise_distances(x1, x2, metric=self.metric)
        if isinstance(e1, data.Table) or isinstance(e1, data.RowInstance):
            dist = DistMatrix(dist, e1, e2)
        else:
            dist = DistMatrix(dist)
        return dist
Example #6
0
 def get_data(self, zname, index=None, axis=0):
    data=[c.to_polygons() for c in self.clt.get_paths()]
    if index==None:
        return data
    if axis==0:
        return atleast_2d(data)[index, :]
    return atleast_2d(data)[:, index]
Example #7
0
    def validate(pos=None, text=None, anchor=None,
                 data_bounds=None,
                 ):

        if text is None:
            text = []
        if isinstance(text, string_types):
            text = [text]
        if pos is None:
            pos = np.zeros((len(text), 2))

        assert pos is not None
        pos = np.atleast_2d(pos)
        assert pos.ndim == 2
        assert pos.shape[1] == 2
        n_text = pos.shape[0]
        assert len(text) == n_text

        anchor = anchor if anchor is not None else (0., 0.)
        anchor = np.atleast_2d(anchor)
        if anchor.shape[0] == 1:
            anchor = np.repeat(anchor, n_text, axis=0)
        assert anchor.ndim == 2
        assert anchor.shape == (n_text, 2)

        if data_bounds is not None:
            data_bounds = _get_data_bounds(data_bounds, pos)
            assert data_bounds.shape[0] == n_text
            data_bounds = data_bounds.astype(np.float64)
            assert data_bounds.shape == (n_text, 4)

        return Bunch(pos=pos, text=text, anchor=anchor,
                     data_bounds=data_bounds)
def format_as_regular_ts(times, values, intervals):
    """Format timeseries as regular timeseries.

    Parameters
    ----------
    times: np.ndarray, shape (n,)
        the times sample we have values measured.
    values: np.ndarray, shape (n,)
        the values of the measured time samples.
    intervals: tuple (init, endit, step)
        the information needed to define the regular times sample.

    Returns
    -------
    x: np.ndarray, shape (n,)
        the regular times samples for which we want the values measured.
    v: np.ndarray, shape (n,)
        the measured values for the regular times sample.

    """
    x = np.arange(*intervals)
    v = interpolate.griddata(np.atleast_2d(times).T, values,
                             np.atleast_2d(x).T, 'linear')
    v = v.squeeze()
    return x, v
def bo_(x_obs, y_obs):
    kernel = kernels.Matern() + kernels.WhiteKernel()
    gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=16)
    gp.fit(x_obs, y_obs)

    xs = list(repeat(np.atleast_2d(np.linspace(0, 10, 128)).T, 2))
    x = cartesian_product(*xs)

    a = a_EI(gp, x_obs=x_obs, y_obs=y_obs)

    argmin_a_x = x[np.argmax(a(x))]

    # heavy evaluation
    print("f({})".format(argmin_a_x))
    f_argmin_a_x = f2d(np.atleast_2d(argmin_a_x))


    plot_2d(gp, x_obs, y_obs, argmin_a_x, a, xs)
    plt.show()


    bo_(
        x_obs=np.vstack((x_obs, argmin_a_x)),
        y_obs=np.hstack((y_obs, f_argmin_a_x)),
    )
Example #10
0
def _cylinder(r, n):
    '''
    Returns the unit cylinder that corresponds to the curve r.
    INPUTS:
    r : a vector of radii
    n : number of coordinates to return for each element in r
    OUTPUTS:
    x, y, z: coordinates of points around cylinder
    '''

    # ensure that r is a column vector
    r = np.atleast_2d(r)
    r_rows, r_cols = r.shape

    if r_cols > r_rows:
        r = r.T

    # find points along x and y axes
    points = np.linspace(0, 2*np.pi, n+1)
    x = np.cos(points)*r
    y = np.sin(points)*r

    # find points along z axis
    rpoints = np.atleast_2d(np.linspace(0, 1, len(r)))
    z = np.ones((1, n+1))*rpoints.T

    return x, y, z
Example #11
0
def hausdorffnorm(A, B):
    '''
    Finds the hausdorff norm between two matrices A and B.
    INPUTS:
    A: numpy array
    B : numpy array
    OUTPUTS:
    Housdorff norm between matrices A and B
    '''
    # ensure matrices are 3 dimensional, and shaped conformably
    if len(A.shape) == 1:
        A = np.atleast_2d(A)

    if len(B.shape) == 1:
        B = np.atleast_2d(B)

    A = np.atleast_3d(A)
    B = np.atleast_3d(B)

    x, y, z = B.shape
    A = np.reshape(A, (z, x, y))
    B = np.reshape(B, (z, x, y))

    # find hausdorff norm: starting from A to B
    z, x, y = B.shape
    temp1 = np.tile(np.reshape(B.T, (y, z, x)), (max(A.shape), 1))
    temp2 = np.tile(np.reshape(A.T, (y, x, z)), (1, max(B.shape)))
    D1 = np.min(np.sqrt(np.sum((temp1-temp2)**2, 0)), axis=0)

    # starting from B to A
    temp1 = np.tile(np.reshape(A.T, (y, z, x)), (max(B.shape), 1))
    temp2 = np.tile(np.reshape(B.T, (y, x, z)), (1, max(A.shape)))
    D2 = np.min(np.sqrt(np.sum((temp1-temp2)**2, 0)), axis=0)

    return np.max([D1, D2])
    def train(self, inputs, targets, learning_rate=1, epochs=10000):
        inputs = np.array(inputs)
        inputs = self.__add_bias(inputs, axis=1)
        targets = np.array(targets)

        for loop_cnt in xrange(epochs):
            p = np.random.randint(inputs.shape[0])
            xp = inputs[p]
            bkp = targets[p]

            gjp = self.__sigmoid(np.dot(self.v, xp))
            gjp = self.__add_bias(gjp)
            gkp = self.__sigmoid(np.dot(self.w, gjp))

            eps2 = self.__sigmoid_deriv(gkp) * (gkp - bkp)
            eps = self.__sigmoid_deriv(gjp) * np.dot(self.w.T, eps2)

            # output layer training
            gjp = np.atleast_2d(gjp)
            eps2 = np.atleast_2d(eps2)
            self.w = self.w - learning_rate * np.dot(eps2.T, gjp)

            # hidden layer training
            xp = np.atleast_2d(xp)
            eps = np.atleast_2d(eps)
            self.v = self.v - learning_rate * np.dot(eps.T, xp)[1:, :]
Example #13
0
    def predict_proba(self, X):
        """Predict probability for each possible outcome.

        Compute the probability estimates for each single sample in X
        and each possible outcome seen during training (categorical
        distribution).

        Parameters
        ----------
        X : array_like, shape = [n_samples, n_features]

        Returns
        -------
        probabilities : array, shape = [n_samples, n_classes]
            Normalized probability distributions across
            class labels
        """
        if sparse.isspmatrix(X):
            X_2d = X
        else:
            X_2d = np.atleast_2d(X)
        weight_matrices = self._get_kernel(self.X_, X_2d)
        if self.kernel == 'knn':
            probabilities = []
            for weight_matrix in weight_matrices:
                ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
                probabilities.append(ine)
            probabilities = np.array(probabilities)
        else:
            weight_matrices = weight_matrices.T
            probabilities = np.dot(weight_matrices, self.label_distributions_)
        normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
        probabilities /= normalizer
        return probabilities
def hash_to_classifier(demo_data, parity=1):
    """
    Convert the inputs taken from the HASH example datatsets
    and convert to something the classifier uses.

    Parameters

    :param demo_data: the output of read_demo.
    :param parity: Ensures the optimization will respect the 
                   symmetry of the solution. parity = 0, means
                   there is no symmetry. For earthquakes use 
                   parity=1, to enforce conservation of 
                   momentum.
    """    

    inputs = {}
    for event, dat in demo_data.items():
        # take off angles need to be "colatitude" measured from
        # Up 0-degrees, Down 180-degrees
        # The other angle needs to be azimuth
        x = atleast_2d(cos(dat[:,0])*sin(dat[:,1]))
        y = atleast_2d(sin(dat[:,0])*sin(dat[:,1]))
        z = atleast_2d(cos(dat[:,1]))
        
        classes = atleast_2d(dat[:,2])    
        if parity != 0:
            x = hstack((x,-x))
            y = hstack((y,-y))
            z = hstack((z,-z))

            classes = hstack((classes, sign(parity)*classes))

        inputs[event] = (x, y, z, classes)
        
    return inputs
Example #15
0
    def fit(self, x, y, learning_rate=0.2, epochs=10000):
        x = np.atleast_2d(x)
        # print x.shape[0], x.shape[1]+1
        temp = np.ones([x.shape[0], x.shape[1] + 1])
        temp[:, 0:-1] = x
        x = temp
        y = np.array(y)

        for k in range(epochs):
            i = np.random.randint(x.shape[0])
            a = [x[i]]

            for l in range(len(self.weights)):
                a.append(self.activation(np.dot(a[l],self.weights[l])))
            error = y[i] - a[-1]
            deltas = [error * self.activation_deriv(a[-1])]

            for l in range(len(a) - 2, 0, -1):
                deltas.append(deltas[-1].dot(self.weights[l].T)*self.activation_deriv(a[l]))

            deltas.reverse()
            for i in range(len(self.weights)):
                layer = np.atleast_2d(a[i])
                delta = np.atleast_2d(deltas[i])
                self.weights[i] += learning_rate * layer.T.dot(delta)
Example #16
0
    def back_propagation(self, rdd_data, learn_rate, iteration, error):
        """ Using standard gradient descent to do the back propagation """
        input_data = np.array(rdd_data)
        real = np.array(map(float, input_data[:, 1]))
        feature = input_data[:, 0]
        feature = map(lambda x: map(float, x), feature)
        ones = np.atleast_2d(np.ones(np.shape(feature)[0])) * self.bias
        feature = np.concatenate((ones.T, np.array(feature, dtype=float)), axis=1)
        for i in range(iteration):
            if i % 50 == 0:
                self.logger.debug("Start the {} iteration".format(i))
            k = random.randint(np.shape(feature)[0])
            train_data = feature[k]
            process_data = [np.array(train_data)]
            target = real[k]
            for layer in self.weights:
                activation = self.af(np.dot(process_data[-1], layer))
                process_data.append(activation)

            error = target - process_data[-1]
            deltas = [error * self.afd(process_data[-1])]
            for l in range(len(process_data) - 2, 0, -1):
                deltas.append(deltas[-1].dot(self.weights[l].T) * self.afd(process_data[l]))

            deltas.reverse()
            for l in range(len(self.weights)):
                layer = np.atleast_2d(process_data[l])
                delta = np.atleast_2d(deltas[l])
                self.weights[l] += learn_rate * layer.T.dot(delta)
            if i % 50 == 0:
                self.logger.debug("{} iteration finished".format(i))
Example #17
0
    def fit(self, inputs, targets, learning_rate=0.2, epochs=10000):
        inputs = self.__add_bias(inputs, axis=1)
        targets = np.array(targets)

        for loop_cnt in xrange(epochs):
            # randomise the order of the inputs
            p = np.random.randint(inputs.shape[0])
            xp = inputs[p]
            bkp = targets[p]

            # forward phase
            gjp = self.__sigmoid(np.dot(self.v, xp))
            gjp = self.__add_bias(gjp)
            gkp = self.__sigmoid(np.dot(self.w, gjp))

            # backward phase(back prop)
            eps2 = self.__sigmoid_deriv(gkp) * (gkp - bkp)
            eps = self.__sigmoid_deriv(gjp) * np.dot(self.w.T, eps2)

            gjp = np.atleast_2d(gjp)
            eps2 = np.atleast_2d(eps2)
            self.w = self.w - learning_rate * np.dot(eps2.T, gjp)

            xp = np.atleast_2d(xp)
            eps = np.atleast_2d(eps)
            self.v = self.v - learning_rate * np.dot(eps.T, xp)[1:, :]
Example #18
0
def maxprod_composition(s, r):
    """
    The max-product composition ``t`` of two fuzzy relation matrices.

    Parameters
    ----------
    s : 2d array, (M, N)
        Fuzzy relation matrix #1.
    r : 2d array, (N, P)
        Fuzzy relation matrix #2.

    Returns
    -------
    t : 2d array, (M, P)
        Max-product composition matrix.

    """
    if s.ndim < 2:
        s = np.atleast_2d(s)
    if r.ndim < 2:
        r = np.atleast_2d(r).T
    m = s.shape[0]
    p = r.shape[1]
    t = np.zeros((m, p))

    for mm in range(m):
        for pp in range(p):
            t[mm, pp] = (s[mm, :] * r[:, pp].T).max()

    return t
Example #19
0
def cartadd(x, y):
    """
    Cartesian addition of fuzzy membership vectors using the algebraic method.

    Parameters
    ----------
    x : 1D array or iterable
        First fuzzy membership vector, of length M.
    y : 1D array or iterable
        Second fuzzy membership vector, of length N.

    Returns
    -------
    z : 2D array
        Cartesian addition of ``x`` and ``y``, of shape (M, N).

    """
    # Ensure rank-1 input
    x, y = np.asarray(x).ravel(), np.asarray(y).ravel()

    m, n = len(x), len(y)

    a = np.dot(np.atleast_2d(x).T, np.ones((1, n)))
    b = np.dot(np.ones((m, 1)), np.atleast_2d(y))

    return a + b
Example #20
0
def cartprod(x, y):
    """
    Cartesian product of two fuzzy membership vectors. Uses ``min()``.

    Parameters
    ----------
    x : 1D array or iterable
        First fuzzy membership vector, of length M.
    y : 1D array or iterable
        Second fuzzy membership vector, of length N.

    Returns
    -------
    z : 2D array
        Cartesian product of ``x`` and ``y``, of shape (M, N).

    """
    # Ensure rank-1 input
    x, y = np.asarray(x).ravel(), np.asarray(y).ravel()

    m, n = len(x), len(y)

    a = np.dot(np.atleast_2d(x).T, np.ones((1, n)))
    b = np.dot(np.ones((m, 1)), np.atleast_2d(y))

    return np.fmin(a, b)
Example #21
0
def maxmin_composition(s, r):
    """
    The max-min composition ``t`` of two fuzzy relation matrices.

    Parameters
    ----------
    s : 2d array, (M, N)
        Fuzzy relation matrix #1.
    r : 2d array, (N, P)
        Fuzzy relation matrix #2.

    Returns
    -------
    T ; 2d array, (M, P)
        Max-min composition, defined by ``T = s o r``.

    """
    if s.ndim < 2:
        s = np.atleast_2d(s)
    if r.ndim < 2:
        r = np.atleast_2d(r).T
    m = s.shape[0]
    p = r.shape[1]
    t = np.zeros((m, p))

    for pp in range(p):
        for mm in range(m):
            t[mm, pp] = (np.fmin(s[mm, :], r[:, pp].T)).max()

    return t
Example #22
0
    def aim(self, yo, yp=None, z=None, a=None, surface=None, filter=True):
        if z is None:
            z = self.pupil_distance
        yo = np.atleast_2d(yo)
        if yp is not None:
            if a is None:
                a = self.pupil_radius
                a = np.array(((-a, -a), (a, a)))
            a = np.arctan2(a, z)
            yp = np.atleast_2d(yp)
            yp = self.map_pupil(yp, a, filter)
            yp = z*np.tan(yp)
            yo, yp = np.broadcast_arrays(yo, yp)

        y = np.zeros((yo.shape[0], 3))
        y[..., :2] = -yo*self.radius
        if surface:
            y[..., 2] = -surface.surface_sag(y)
        uz = (0, 0, z)
        if self.telecentric:
            u = uz
        else:
            u = uz - y
        if yp is not None:
            s, m = sagittal_meridional(u, uz)
            u += yp[..., 0, None]*s + yp[..., 1, None]*m
        normalize(u)
        if z < 0:
            u *= -1
        return y, u
Example #23
0
    def propagate_backward(self, target, lrate=0.1, momentum=0.1):
        ''' Back propagate error related to target using lrate. '''

        deltas = []

        # Compute error on output layer
        error = target - self.layers[-1]
        delta = error*dsigmoid(self.layers[-1])
        deltas.append(delta)

        # Compute error on hidden layers
        for i in range(len(self.shape)-2,0,-1):
            delta = np.dot(deltas[0],self.weights[i].T)*dsigmoid(self.layers[i])
            deltas.insert(0,delta)
            
        # Update weights
        for i in range(len(self.weights)):
            layer = np.atleast_2d(self.layers[i])
            delta = np.atleast_2d(deltas[i])
            dw = np.dot(layer.T,delta)
            self.weights[i] += lrate*dw + momentum*self.dw[i]
            self.dw[i] = dw

        # Return error
        return (error**2).sum()
Example #24
0
def _cmeans_predict0(test_data, cntr, u_old, c, m):
    """
    Single step in fuzzy c-means prediction algorithm. Clustering algorithm
    modified from Ross, Fuzzy Logic w/Engineering Applications (2010)
    p.352-353, equations 10.28 - 10.35, but this method to generate fuzzy
    predictions was independently derived by Josh Warner.

    Parameters inherited from cmeans()

    Very similar to initial clustering, except `cntr` is not updated, thus
    the new test data are forced into known (trained) clusters.

    """
    # Normalizing, then eliminating any potential zero values.
    u_old /= np.ones((c, 1)).dot(np.atleast_2d(u_old.sum(axis=0)))
    u_old = np.fmax(u_old, np.finfo(float).eps)

    um = u_old ** m
    test_data = test_data.T

    # For prediction, we do not recalculate cluster centers. The test_data is
    # forced to conform to the prior clustering.

    d = _distance(test_data, cntr)
    d = np.fmax(d, np.finfo(float).eps)

    jm = (um * d ** 2).sum()

    u = d ** (- 2. / (m - 1))
    u /= np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0)))

    return u, jm, d
Example #25
0
def _cmeans0(data, u_old, c, m):
    """
    Single step in generic fuzzy c-means clustering algorithm. Modified from
    Ross, Fuzzy Logic w/Engineering Applications (2010) p.352-353, equations
    10.28 - 10.35.

    Parameters inherited from cmeans()

    This algorithm is a ripe target for Cython.

    """
    # Normalizing, then eliminating any potential zero values.
    u_old /= np.ones((c, 1)).dot(np.atleast_2d(u_old.sum(axis=0)))
    u_old = np.fmax(u_old, np.finfo(float).eps)

    um = u_old ** m

    # Calculate cluster centers
    data = data.T
    cntr = um.dot(data) / (np.ones((data.shape[1],
                                    1)).dot(np.atleast_2d(um.sum(axis=1))).T)

    d = _distance(data, cntr)
    d = np.fmax(d, np.finfo(float).eps)

    jm = (um * d ** 2).sum()

    u = d ** (- 2. / (m - 1))
    u /= np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0)))

    return cntr, u, jm, d
Example #26
0
def assert_equal_from_matlab(a, b, options=None):
    # Compares a and b for equality. They are all going to be numpy
    # types. hdf5storage and scipy behave differently when importing
    # arrays as to whether they are 2D or not, so we will make them all
    # at least 2D regardless. For strings, the two packages produce
    # transposed results of each other, so one just needs to be
    # transposed. For object arrays, each element must be iterated over
    # to be compared. For structured ndarrays, their fields need to be
    # compared and then they can be compared element and field
    # wise. Otherwise, they can be directly compared. Note, the type is
    # often converted by scipy (or on route to the file before scipy
    # gets it), so comparisons are done by value, which is not perfect.
    a = np.atleast_2d(a)
    b = np.atleast_2d(b)
    if a.dtype.char == 'U':
        a = a.T
    if b.dtype.name == 'object':
        a = a.flatten()
        b = b.flatten()
        for index, x in np.ndenumerate(a):
            assert_equal_from_matlab(a[index], b[index], options)
    elif b.dtype.names is not None or a.dtype.names is not None:
        assert a.dtype.names is not None
        assert b.dtype.names is not None
        assert set(a.dtype.names) == set(b.dtype.names)
        a = a.flatten()
        b = b.flatten()
        for k in b.dtype.names:
            for index, x in np.ndenumerate(a):
                assert_equal_from_matlab(a[k][index], b[k][index],
                                         options)
    else:
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', RuntimeWarning)
            npt.assert_equal(a, b)
Example #27
0
 def _compute_model(self, pset):
     """Computes a model and inserts results into the Mongo collection."""
     nBands = fsps.driver.get_n_bands()
     nLambda = fsps.driver.get_n_lambda()
     nAges = fsps.driver.get_n_ages()
     fsps.driver.comp_sp(pset['dust_type'], pset['zmet'], pset['sfh'],
         pset['tau'], pset['const'], pset['fburst'], pset['tburst'],
         pset['dust_tesc'], pset['dust1'], pset['dust2'],
         pset['dust_clumps'], pset['frac_nodust'], pset['dust_index'],
         pset['mwr'], pset['wgp1'], pset['wgp2'], pset['wgp3'],
         pset['duste_gamma'], pset['duste_umin'], pset['duste_qpah'],
         pset['tage'])
     if pset['tage'] == 0.:
         # SFH over all ages is returned
         mags = fsps.driver.get_csp_mags(nBands, nAges)
         specs = fsps.driver.get_csp_specs(nLambda, nAges)
         age, mass, lbol, sfr, dust_mass = fsps.driver.get_csp_stats(nAges)
     else:
         # get only a single age, stored in first age bin
         # arrays must be re-formated to appear like one-age versions of
         # the outputs from get_csp_mags, etc.
         mags = fsps.driver.get_csp_mags_at_age(1, nBands)
         specs = fsps.driver.get_csp_specs_at_age(1, nLambda)
         age, mass, lbol, sfr, dust_mass \
                 = fsps.driver.get_csp_stats_at_age(1)
         age = np.atleast_1d(age)
         mass = np.atleast_1d(mass)
         lbol = np.atleast_1d(lbol)
         sfr = np.atleast_1d(sfr)
         dust_mass = np.atleast_1d(dust_mass)
         mags = np.atleast_2d(mags)
         specs = np.atleast_2d(specs)
     dataArray = self._splice_mag_spec_arrays(age, mass, lbol, sfr,
             dust_mass, mags, specs, nLambda)
     self._insert_model(pset.name, dataArray)
Example #28
0
def _sanitize_pixel_positions(positions):

    if isinstance(positions, u.Quantity):
        if positions.unit is u.pixel:
            positions = positions.value
        else:
            raise u.UnitsError("positions should be in pixel units")

    if isinstance(positions, u.Quantity):
        positions = positions.value
    elif isinstance(positions, (list, tuple, np.ndarray)):
        positions = np.atleast_2d(positions)
        if positions.shape[1] != 2:
            if positions.shape[0] == 2:
                positions = np.transpose(positions)
            else:
                raise TypeError("List or array of (x, y) pixel coordinates "
                                "is expected got '{0}'.".format(positions))
    elif isinstance(positions, zip):
        # This is needed for zip to work seamlessly in Python 3
        positions = np.atleast_2d(list(positions))
    else:
        raise TypeError("List or array of (x, y) pixel coordinates "
                        "is expected got '{0}'.".format(positions))

    if positions.ndim > 2:
        raise ValueError('{0}-d position array not supported. Only 2-d '
                         'arrays supported.'.format(positions.ndim))

    return positions
Example #29
0
 def backPropagation(self):
     print "start back propagation"
     accuracy_prev = 0.0
     for step in range(0, self.backPropN):
         print "------------------------------"
         for (i,img) in zip(self.training_label,self.training_data): 
             output_ref = np.zeros((1,self.output_num),dtype=np.double)
             output_ref[0][int(i)] = 1.0
             self.run(img)
             # output error
             output_error = (self.output_output - output_ref) * self.sigmoid_d(self.output_output)
             # middle_error
             middle_error = np.dot(output_error,self.w2) * self.sigmoid_d(self.middle_output)
             middle_error = np.resize(middle_error,(1,self.middle_num))
             # w2 update
             self.w2 -= self.nu * np.dot(output_error.T,np.atleast_2d(self.middle_output)) 
             # w1 update
             self.w1 -= self.nu * np.dot(middle_error.T,np.atleast_2d(self.input_output))
         self.identify()
         if (self.accuracy < accuracy_prev):
             print "Warning: Accuracy is Decreasing !!"
         accuracy_prev = self.accuracy
         print "BackPropagation Step " + str(step+1) + " finished"
         print "------------------------------"
     np.savetxt("w1.txt",self.w1)
     np.savetxt("w2.txt",self.w2)
     print "w1 and w2 saved and back propagation finished"
Example #30
0
    def Pred_EOF_CCA(self):
        '''
        预报模块,需要进一步完善,有很多内容需要进一步深入
        '''

        I_Year = self.I_Year
        I_YearP = self.I_YearP
        print('I_Year=',I_Year)
        print('I_YearP=',I_YearP)
        #print(self.Field[:,0,0])
        #print(self.FieldP[:,0,0])

        #sys.exit(0)

        Region = self.Region[:,np.in1d(I_Year,I_YearP)]
        print('I_YearR=',I_Year[np.in1d(I_Year,I_YearP)])

        FieldP = self.FieldP[:,self.p_np3]  #等于过滤后的场文件
        FieldP = FieldP.T

        FieldP2 = FieldP[:,np.in1d(I_YearP,I_Year)]

        print(FieldP2.shape,np.atleast_2d(FieldP[:,-1]).T.shape)

        print('FieldP.shape = ',FieldP.shape)
        print('FieldP2.shape = ',FieldP2.shape)
        print('Region.shape = ',Region.shape)
        self.X_Pre = dclim.dpre_eof_cca(FieldP2,Region,np.atleast_2d(FieldP[:,-1]).T,4)
        print(self.X_Pre.shape)

        self.out = np.hstack((self.StaLatLon,self.X_Pre))
        
        print('Pred Year is ',I_YearP[-1])
        np.savetxt('out.txt',self.out,fmt='%5d %7.2f %7.2f %7.2f',delimiter=' ')
Example #31
0
    ax.set_xlim(-2, 2)
    ax.set_ylabel('Y')
    ax.set_ylim(-2, 2)
    ax.set_zlabel('Z')
    ax.set_zlim(-10, 2)

    plt.show()


if __name__ == '__main__':

    d = 2
    N = 100
    nu = 0e-1
    x = np.linspace(0, 1, d)
    Xp = np.repeat(np.atleast_2d(x), N, axis=0)
    Xn = np.repeat(np.atleast_2d(x[::-1]), N, axis=0)

    from circle import getCircle
    Xp, Xn = getCircle(N)
    X = np.vstack((Xp, Xn))
    d = X.shape[1]
    Nu = nu * (2 * np.random.rand(2 * N, d) - 1)
    print "NSR", np.mean(100 * np.linalg.norm(Nu, axis=1) /
                         np.linalg.norm(X, axis=1))
    X += Nu
    Y = np.array([1] * N + [-1] * N)

    instances = createInstances(X, Y)

    classifier = cafeMap(Lambda=1e-1,
Example #32
0
    def test_boolean_comparison(self):

        # GH 4576
        # boolean comparisons with a tuple/list give unexpected results
        df = DataFrame(np.arange(6).reshape((3, 2)))
        b = np.array([2, 2])
        b_r = np.atleast_2d([2, 2])
        b_c = b_r.T
        lst = [2, 2, 2]
        tup = tuple(lst)

        # gt
        expected = DataFrame([[False, False], [False, True], [True, True]])
        result = df > b
        assert_frame_equal(result, expected)

        result = df.values > b
        assert_numpy_array_equal(result, expected.values)

        msg1d = 'Unable to coerce to Series, length must be 2: given 3'
        msg2d = 'Unable to coerce to DataFrame, shape must be'
        msg2db = 'operands could not be broadcast together with shapes'
        with pytest.raises(ValueError, match=msg1d):
            # wrong shape
            df > lst

        with pytest.raises(ValueError, match=msg1d):
            # wrong shape
            result = df > tup

        # broadcasts like ndarray (GH#23000)
        result = df > b_r
        assert_frame_equal(result, expected)

        result = df.values > b_r
        assert_numpy_array_equal(result, expected.values)

        with pytest.raises(ValueError, match=msg2d):
            df > b_c

        with pytest.raises(ValueError, match=msg2db):
            df.values > b_c

        # ==
        expected = DataFrame([[False, False], [True, False], [False, False]])
        result = df == b
        assert_frame_equal(result, expected)

        with pytest.raises(ValueError, match=msg1d):
            result = df == lst

        with pytest.raises(ValueError, match=msg1d):
            result = df == tup

        # broadcasts like ndarray (GH#23000)
        result = df == b_r
        assert_frame_equal(result, expected)

        result = df.values == b_r
        assert_numpy_array_equal(result, expected.values)

        with pytest.raises(ValueError, match=msg2d):
            df == b_c

        assert df.values.shape != b_c.shape

        # with alignment
        df = DataFrame(np.arange(6).reshape((3, 2)),
                       columns=list('AB'),
                       index=list('abc'))
        expected.index = df.index
        expected.columns = df.columns

        with pytest.raises(ValueError, match=msg1d):
            result = df == lst

        with pytest.raises(ValueError, match=msg1d):
            result = df == tup
Example #33
0
def plot_ess(
    ax,
    plotters,
    xdata,
    ess_tail_dataset,
    mean_ess,
    sd_ess,
    idata,
    data,
    text_x,
    text_va,
    kind,
    extra_methods,
    rows,
    cols,
    figsize,
    kwargs,
    extra_kwargs,
    text_kwargs,
    _linewidth,
    _markersize,
    n_samples,
    relative,
    min_ess,
    xt_labelsize,
    titlesize,
    ax_labelsize,
    ylabel,
    rug,
    rug_kind,
    rug_kwargs,
    hline_kwargs,
    backend_kwargs,
    show,
):
    """Bokeh essplot."""
    if backend_kwargs is None:
        backend_kwargs = {}

    backend_kwargs = {
        **backend_kwarg_defaults(),
        **backend_kwargs,
    }
    if ax is None:
        _, ax = _create_axes_grid(
            len(plotters),
            rows,
            cols,
            figsize=figsize,
            squeeze=False,
            constrained_layout=True,
            backend="bokeh",
            backend_kwargs=backend_kwargs,
        )
    else:
        ax = np.atleast_2d(ax)

    for (var_name, selection,
         x), ax_ in zip(plotters,
                        (item for item in ax.flatten() if item is not None)):
        bulk_points = ax_.circle(np.asarray(xdata), np.asarray(x), size=6)
        if kind == "evolution":
            bulk_line = ax_.line(np.asarray(xdata), np.asarray(x))
            ess_tail = ess_tail_dataset[var_name].sel(**selection)
            tail_points = ax_.line(np.asarray(xdata),
                                   np.asarray(ess_tail),
                                   color="orange")
            tail_line = ax_.circle(np.asarray(xdata),
                                   np.asarray(ess_tail),
                                   size=6,
                                   color="orange")
        elif rug:
            if rug_kwargs is None:
                rug_kwargs = {}
            if not hasattr(idata, "sample_stats"):
                raise ValueError(
                    "InferenceData object must contain sample_stats for rug plot"
                )
            if not hasattr(idata.sample_stats, rug_kind):
                raise ValueError(
                    "InferenceData does not contain {} data".format(rug_kind))

            rug_kwargs.setdefault("space", 0.1)
            _rug_kwargs = {}
            _rug_kwargs.setdefault("size", 8)
            _rug_kwargs.setdefault("line_color",
                                   rug_kwargs.get("line_color", "black"))
            _rug_kwargs.setdefault("line_width", 1)
            _rug_kwargs.setdefault("line_alpha", 0.35)
            _rug_kwargs.setdefault("angle", np.pi / 2)

            values = data[var_name].sel(**selection).values.flatten()
            mask = idata.sample_stats[rug_kind].values.flatten()
            values = rankdata(values)[mask]
            rug_space = np.max(x) * rug_kwargs.pop("space")
            rug_x, rug_y = values / (len(mask) -
                                     1), np.zeros_like(values) - rug_space

            glyph = Dash(x="rug_x", y="rug_y", **_rug_kwargs)
            cds_rug = ColumnDataSource({
                "rug_x": np.asarray(rug_x),
                "rug_y": np.asarray(rug_y)
            })
            ax_.add_glyph(cds_rug, glyph)

            hline = Span(
                location=0,
                dimension="width",
                line_color="black",
                line_width=_linewidth,
                line_alpha=0.7,
            )

            ax_.renderers.append(hline)

        if extra_methods:
            mean_ess_i = mean_ess[var_name].sel(**selection).values.item()
            sd_ess_i = sd_ess[var_name].sel(**selection).values.item()

            hline = Span(
                location=mean_ess_i,
                dimension="width",
                line_color="black",
                line_width=2,
                line_dash="dashed",
                line_alpha=1.0,
            )

            ax_.renderers.append(hline)

            hline = Span(
                location=sd_ess_i,
                dimension="width",
                line_color="black",
                line_width=1,
                line_dash="dashed",
                line_alpha=1.0,
            )

            ax_.renderers.append(hline)

        hline = Span(
            location=400 / n_samples if relative else min_ess,
            dimension="width",
            line_color="red",
            line_width=3,
            line_dash="dashed",
            line_alpha=1.0,
        )

        ax_.renderers.append(hline)

        if kind == "evolution":
            legend = Legend(
                items=[("bulk", [bulk_points, bulk_line]),
                       ("tail", [tail_line, tail_points])],
                location="center_right",
                orientation="horizontal",
            )
            ax_.add_layout(legend, "above")
            ax_.legend.click_policy = "hide"

        title = Title()
        title.text = make_label(var_name, selection)
        ax_.title = title

        ax_.xaxis.axis_label = "Total number of draws" if kind == "evolution" else "Quantile"
        ax_.yaxis.axis_label = ylabel.format(
            "Relative ESS" if relative else "ESS")

    if backend_show(show):
        grid = gridplot(ax.tolist(), toolbar_location="above")
        bkp.show(grid)

    return ax
     layer_2_value.append(layer_2)
 
     # hitung error output
     layer_2_error = layer_2 - Y[:,None] #problemnya, y diganti dr Y matrix
     
     # gradient descent 
     # layer_2_error = 0.5*layer_2_error**2
     
     # error di output layer -> layer 2 deltas (masuk ke context layer dari hidden layer)
     layer_2_delta = layer_2_error*dtanh(layer_2)
     
     # error 
     layer_1_delta = (np.dot(layer_h_deltas,synapse_h.T) + np.dot(layer_2_delta,synapse_1.T)) * dtanh(layer_1)
 
     # calculate weight update (gradient)
     synapse_1_update = np.dot(np.atleast_2d(layer_1).T,(layer_2_delta))
     synapse_h_update = np.dot(np.atleast_2d(context_layer).T,(layer_1_delta))
     synapse_0_update = np.dot(X.T,(layer_1_delta))
     
     # concatenate weight
     synapse_0_c = np.reshape(synapse_0,(-1,1))
     synapse_h_c = np.reshape(synapse_h,(-1,1))
     synapse_1_c = np.reshape(synapse_1,(-1,1))
     w_concat = np.concatenate((synapse_0_c,synapse_h_c,synapse_1_c), axis=0)
     
     synapse_0_masuk = np.reshape(synapse_0_update,(1,-1)) # satu baris kesamping
     synapse_h_masuk = np.reshape(synapse_h_update,(1,-1))
     synapse_1_masuk = np.reshape(synapse_1_update,(1,-1))
     masuk = np.concatenate((synapse_0_masuk,synapse_h_masuk,synapse_1_masuk), axis=1)
     
     #%% Unscented Kalman Filter without filterpy
 def fitModel(self):
     if self.isValid() and not self.isFit:
         x = np.atleast_2d(self.xValues)
         y = np.array(self.yValues).reshape(-1, 1)
         self.model.fit(x, y)
         self.isFit = True
Example #36
0
 def J(self):
     result = self.dr_wrt(self.x).copy()
     return np.atleast_2d(result) if not sp.issparse(result) else result
Example #37
0
 def make2D(x):
     return np.atleast_2d(x).T if x.ndim == 1 else x
def sumprimebyiter(n=100):
    primesum = 0
    for i in range(1, n + 1):
        if (True == checkprime(i)):
            primesum += i
    return primesum


sumprimebyiter()

import numpy as np


def sumprimebyarr(n=100):
    a = np.arange(1, n + 1)
    # return sum(a[np.array(map(CheckPrime,a))])
    #此处用python自带的map函数应用到向量的每个元素
    check_prime_vec = np.vectorize(checkprime)
    #此处用np.vectorize,可以将外置函数应用到向量的每个元素
    return np.sum(a[check_prime_vec(a)])


sumprimebyarr()

#随机生成10个坐标,计算两两之间距离
Z = np.random.randint(10, size=(10, 2))
X, Y = np.atleast_2d(Z[:, 0]), np.atleast_2d(Z[:, 1])
D = np.sqrt((X - X.T)**2 + (Y - Y.T)**2)
print(D)
Example #39
0
y = pd.read_csv("MLB/y_sort.csv", encoding='latin-1', names=['Score'])
X2 = X['Expected_Runs']
X = X.drop('Expected_Runs', axis=1)

from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.20,
                                                    random_state=0)

#----------------------------------------------------------------------

# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)

alpha = 0.95

clf = GradientBoostingRegressor(loss='quantile',
                                alpha=alpha,
                                n_estimators=250,
                                max_depth=3,
                                learning_rate=.1,
                                min_samples_leaf=9,
                                min_samples_split=9)

clf.fit(X_train, y_train)

# Make the prediction on the meshed x-axis
 def mat(x):
     return np.atleast_2d(np.array(x, dtype=np.float32))
Example #41
0
    def __init__(self, model, ipakcb=None, iswtoc=0, nsystm=1, ithk=0, ivoid=0,
                 istpcs=1, icrcc=0,
                 lnwt=0, izcfl=0, izcfm=0, iglfl=0, iglfm=0, iestfl=0,
                 iestfm=0, ipcsfl=0,
                 ipcsfm=0, istfl=0, istfm=0, gl0=0., sgm=1.7, sgs=2., thick=1.,
                 sse=1., ssv=1., cr=0.01, cc=0.25,
                 void=0.82, sub=0., pcsoff=0., pcs=0., ids16=None, ids17=None,
                 extension='swt', unitnumber=None, filenames=None):
        """
        Package constructor.

        """
        # set default unit number of one is not specified
        if unitnumber is None:
            unitnumber = ModflowSwt.defaultunit()

        # set filenames
        if filenames is None:
            filenames = [None for x in range(15)]
        elif isinstance(filenames, str):
            filenames = [filenames] + [None for x in range(14)]
        elif isinstance(filenames, list):
            if len(filenames) < 15:
                n = 15 - len(filenames) + 1
                filenames = filenames + [None for x in range(n)]

        # update external file information with cbc output, if necessary
        if ipakcb is not None:
            fname = filenames[1]
            model.add_output_file(ipakcb, fname=fname,
                                  package=ModflowSwt.ftype())
        else:
            ipakcb = 0

        item16_extensions = ["subsidence.hds", "total_comp.hds",
                             "inter_comp.hds", "vert_disp.hds",
                             "precon_stress.hds", "precon_stress_delta.hds",
                             "geostatic_stress.hds",
                             "geostatic_stress_delta.hds",
                             "eff_stress.hds", "eff_stress_delta.hds",
                             "void_ratio.hds", "thick.hds", "lay_center.hds"]
        item16_units = [2052 + i for i in range(len(item16_extensions))]

        if iswtoc > 0:
            idx = 0
            for k in range(1, 26, 2):
                ext = item16_extensions[idx]
                if ids16 is None:
                    iu = item16_units[idx]
                else:
                    iu = ids16[k]
                fname = filenames[idx + 2]
                model.add_output_file(iu, fname=fname, extension=ext,
                                      package=ModflowSwt.ftype())
                idx += 1

        extensions = [extension]
        name = [ModflowSwt.ftype()]
        units = [unitnumber]
        extra = ['']

        # set package name
        fname = [filenames[0]]

        # Call ancestor's init to set self.parent, extension, name and unit number
        Package.__init__(self, model, extension=extensions, name=name,
                         unit_number=units, extra=extra, filenames=fname)

        nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper

        self.heading = '# {} package for '.format(self.name[0]) + \
                       ' {}, '.format(model.version_types[model.version]) + \
                       'generated by Flopy.'
        self.url = 'swt.htm'

        self.ipakcb = ipakcb
        self.iswtoc = iswtoc

        self.nsystm = nsystm
        self.ithk = ithk
        self.ivoid = ivoid
        self.istpcs = istpcs
        self.icrcc = icrcc

        self.lnwt = Util2d(model, (nsystm,), np.int, lnwt, name='lnwt')

        self.izcfl = izcfl
        self.izcfm = izcfm
        self.iglfl = iglfl
        self.iglfm = iglfm
        self.iestfl = iestfl
        self.iestfm = iestfm
        self.ipcsfl = ipcsfl
        self.ipcsfm = ipcsfm
        self.istfl = istfl
        self.istfm = istfm

        self.gl0 = Util2d(model, (nrow, ncol), np.float32, gl0, name='gl0')
        self.sgm = Util2d(model, (nrow, ncol), np.float32, sgm, name='sgm')
        self.sgs = Util2d(model, (nrow, ncol), np.float32, sgs, name='sgs')

        # interbed data
        self.thick = Util3d(model, (nsystm, nrow, ncol), np.float32, thick,
                            name='thick',
                            locat=self.unit_number[0])
        self.void = Util3d(model, (nsystm, nrow, ncol), np.float32, void,
                           name='void',
                           locat=self.unit_number[0])
        self.sub = Util3d(model, (nsystm, nrow, ncol), np.float32, sub,
                          name='sub',
                          locat=self.unit_number[0])
        if icrcc != 0:
            self.sse = Util3d(model, (nsystm, nrow, ncol), np.float32, sse,
                              name='sse',
                              locat=self.unit_number[0])
            self.ssv = Util3d(model, (nsystm, nrow, ncol), np.float32, ssv,
                              name='ssv',
                              locat=self.unit_number[0])
            self.cr = None
            self.cc = None
        else:
            self.sse = None
            self.ssv = None
            self.cr = Util3d(model, (nsystm, nrow, ncol), np.float32, cr,
                             name='cr',
                             locat=self.unit_number[0])
            self.cc = Util3d(model, (nsystm, nrow, ncol), np.float32, cc,
                             name='cc',
                             locat=self.unit_number[0])

        # layer data
        if istpcs != 0:
            self.pcsoff = Util3d(model, (nlay, nrow, ncol), np.float32, pcsoff,
                                 name='pcsoff',
                                 locat=self.unit_number[0])
            self.pcs = None
        else:
            self.pcsoff = None
            self.pcs = Util3d(model, (nlay, nrow, ncol), np.float32, pcs,
                              name='pcs',
                              locat=self.unit_number[0])

        # output data
        if iswtoc > 0:
            if ids16 is None:
                self.ids16 = np.zeros((26), dtype=np.int)
                ui = 0
                for i in range(1, 26, 2):
                    self.ids16[i] = item16_units[ui]
                    ui += 1
            else:
                if isinstance(ids16, list):
                    ds16 = np.array(ids16)
                assert len(ids16) == 26
                self.ids16 = ids16

            if ids17 is None:
                ids17 = np.ones((30), dtype=np.int)
                ids17[0] = 0
                ids17[2] = 0
                ids17[1] = 9999
                ids17[3] = 9999
                self.ids17 = np.atleast_2d(ids17)
            else:
                if isinstance(ids17, list):
                    ids17 = np.atleast_2d(np.array(ids17))
                assert ids17.shape[1] == 30
                self.ids17 = ids17

        # add package to model
        self.parent.add_package(self)
Example #42
0
def eperm_ema(por, eperm1, eperm2, aratio, eguess, eperm3=None, sw=None):
    """Effective electric permittivity using EMA.

    Markov et al., 2012, Journal of Applied Geophysics, Eq. 1.

    Recursive formula.

    Parameters
    ----------
    por : float or array
        Concentration of constituent 1 (host, wetting phase).

    eperm1, eperm2, eperm3 : float or array
        Electric permittivity of constituent 1, 2, 3.
        eperm3 is optional and corresponds to oil; however, if provided one
        has also to provide sw.
        Generally, eperm1=matrix, eperm2=water, eperm3=hydrocarbon.

    sw : float or array
        Water saturation (-), only required if eperm3 is provided.

    aratio : array
        Aspect ratio.

    eguess: float or array
        Initial guess of dielectric permittivity for recursion.

    Returns
    -------
    eperm: float or array
        Effective dielectric permittivity.

    """
    # Check and cast input
    c = _conc(por, sw)
    e = _stack(eperm1, eperm2, eperm3)
    eg = np.atleast_2d(eguess)
    if e.shape[0] == 1:
        eg = eg.transpose()

    def recursive(eperm, guess, dpl, conc, tol=1e-7):
        """Recursion formula to solve for effective electric permittivity.

        Alternatively we could use a root-finding algorithm.
        """

        # Calculate effective electric permittivity for this guess
        R = np.sum(1 / (dpl * eperm[:, None] + (1 - dpl) * guess), axis=1)
        effe = np.sum(conc * eperm * R) / np.sum(conc * R)

        # If error above tolerance, call it again with new guess
        if np.abs(guess - effe) > tol:
            effe = recursive(eperm, effe, dpl, conc, tol)

        return effe

    # Get depolarization factors
    dpl = fdepol(aratio)

    # Loop over porosities
    ema = np.zeros((c.shape[0], e.shape[0]), dtype=e.dtype)
    for ci in range(c.shape[0]):
        for ei in range(e.shape[0]):

            # Calculate effective electric permittivity
            ema[ci, ei] = recursive(e[ei, :], eg[ci, ei], dpl[ci, :], c[ci, :])

    return np.squeeze(ema)
Example #43
0
    def _check_params(self, n_samples=None):

        # Check regression model
        if not callable(self.regr):
            if self.regr in self._regression_types:
                self.regr = self._regression_types[self.regr]
            else:
                raise ValueError("regr should be one of %s or callable, "
                                 "%s was given."
                                 % (self._regression_types.keys(), self.regr))

        # Check regression weights if given (Ordinary Kriging)
        if self.beta0 is not None:
            self.beta0 = np.atleast_2d(self.beta0)
            if self.beta0.shape[1] != 1:
                # Force to column vector
                self.beta0 = self.beta0.T

        # Check correlation model
        if not callable(self.corr):
            if self.corr in self._correlation_types:
                self.corr = self._correlation_types[self.corr]
            else:
                raise ValueError("corr should be one of %s or callable, "
                                 "%s was given."
                                 % (self._correlation_types.keys(), self.corr))

        # Check storage mode
        if self.storage_mode != 'full' and self.storage_mode != 'light':
            raise ValueError("Storage mode should either be 'full' or "
                             "'light', %s was given." % self.storage_mode)

        # Check correlation parameters
        self.theta0 = np.atleast_2d(self.theta0)
        lth = self.theta0.size

        if self.thetaL is not None and self.thetaU is not None:
            self.thetaL = np.atleast_2d(self.thetaL)
            self.thetaU = np.atleast_2d(self.thetaU)
            if self.thetaL.size != lth or self.thetaU.size != lth:
                raise ValueError("theta0, thetaL and thetaU must have the "
                                 "same length.")
            if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
                raise ValueError("The bounds must satisfy O < thetaL <= "
                                 "thetaU.")

        elif self.thetaL is None and self.thetaU is None:
            if np.any(self.theta0 <= 0):
                raise ValueError("theta0 must be strictly positive.")

        elif self.thetaL is None or self.thetaU is None:
            raise ValueError("thetaL and thetaU should either be both or "
                             "neither specified.")

        # Force verbose type to bool
        self.verbose = bool(self.verbose)

        # Force normalize type to bool
        self.normalize = bool(self.normalize)

        # Check nugget value
        self.nugget = np.asarray(self.nugget)
        if np.any(self.nugget) < 0.:
            raise ValueError("nugget must be positive or zero.")
        if (n_samples is not None
                and self.nugget.shape not in [(), (n_samples,)]):
            raise ValueError("nugget must be either a scalar "
                             "or array of length n_samples.")

        # Check optimizer
        if self.optimizer not in self._optimizer_types:
            raise ValueError("optimizer should be one of %s"
                             % self._optimizer_types)

        # Force random_start type to int
        self.random_start = int(self.random_start)
Example #44
0
File: tree.py Project: yazun/isaac
def check_array(array,
                accept_sparse=None,
                dtype="numeric",
                order=None,
                copy=False,
                force_all_finite=True,
                ensure_2d=True,
                allow_nd=False,
                ensure_min_samples=1,
                ensure_min_features=1):
    """Input validation on an array, list, sparse matrix or similar.

    By default, the input is converted to an at least 2nd numpy array.
    If the dtype of the array is object, attempt converting to float,
    raising on failure.

    Parameters
    ----------
    array : object
        Input object to check / convert.

    accept_sparse : string, list of string or None (default=None)
        String[s] representing allowed sparse matrix formats, such as 'csc',
        'csr', etc.  None means that sparse matrix input will raise an error.
        If the input is sparse but not in the allowed format, it will be
        converted to the first listed format.

    dtype : string, type or None (default="numeric")
        Data type of result. If None, the dtype of the input is preserved.
        If "numeric", dtype is preserved unless array.dtype is object.

    order : 'F', 'C' or None (default=None)
        Whether an array will be forced to be fortran or c-style.

    copy : boolean (default=False)
        Whether a forced copy will be triggered. If copy=False, a copy might
        be triggered by a conversion.

    force_all_finite : boolean (default=True)
        Whether to raise an error on np.inf and np.nan in X.

    ensure_2d : boolean (default=True)
        Whether to make X at least 2d.

    allow_nd : boolean (default=False)
        Whether to allow X.ndim > 2.

    ensure_min_samples : int (default=1)
        Make sure that the array has a minimum number of samples in its first
        axis (rows for a 2D array). Setting to 0 disables this check.

    ensure_min_features : int (default=1)
        Make sure that the 2D array has some minimum number of features
        (columns). The default value of 1 rejects empty datasets.
        This check is only enforced when the input data has effectively 2
        dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
        disables this check.

    Returns
    -------
    X_converted : object
        The converted and validated X.
    """
    if isinstance(accept_sparse, str):
        accept_sparse = [accept_sparse]

    # store whether originally we wanted numeric dtype
    dtype_numeric = dtype == "numeric"

    if ensure_2d:
        array = np.atleast_2d(array)
    if dtype_numeric:
        if hasattr(array, "dtype") and getattr(array.dtype, "kind",
                                               None) == "O":
            # if input is object, convert to float.
            dtype = np.float64
        else:
            dtype = None
    array = np.array(array, dtype=dtype, order=order, copy=copy)
    # make sure we actually converted to numeric:
    if dtype_numeric and array.dtype.kind == "O":
        array = array.astype(np.float64)
    if not allow_nd and array.ndim >= 3:
        raise ValueError("Found array with dim %d. Expected <= 2" % array.ndim)
    if force_all_finite:
        _assert_all_finite(array)

    shape_repr = _shape_repr(array.shape)
    if ensure_min_samples > 0:
        n_samples = _num_samples(array)
        if n_samples < ensure_min_samples:
            raise ValueError("Found array with %d sample(s) (shape=%s) while a"
                             " minimum of %d is required." %
                             (n_samples, shape_repr, ensure_min_samples))

    if ensure_min_features > 0 and array.ndim == 2:
        n_features = array.shape[1]
        if n_features < ensure_min_features:
            raise ValueError("Found array with %d feature(s) (shape=%s) while"
                             " a minimum of %d is required." %
                             (n_features, shape_repr, ensure_min_features))
    return array
Example #45
0
def _beta_divergence(X, W, H, beta, square_root=False):
    """Compute the beta-divergence of X and dot(W, H).

    Parameters
    ----------
    X : float or array-like, shape (n_samples, n_features)

    W : float or dense array-like, shape (n_samples, n_components)

    H : float or dense array-like, shape (n_components, n_features)

    beta : float, string in {'frobenius', 'kullback-leibler', 'itakura-saito'}
        Parameter of the beta-divergence.
        If beta == 2, this is half the Frobenius *squared* norm.
        If beta == 1, this is the generalized Kullback-Leibler divergence.
        If beta == 0, this is the Itakura-Saito divergence.
        Else, this is the general beta-divergence.

    square_root : boolean, default False
        If True, return np.sqrt(2 * res)
        For beta == 2, it corresponds to the Frobenius norm.

    Returns
    -------
        res : float
            Beta divergence of X and np.dot(X, H)
    """
    beta = _beta_loss_to_float(beta)

    # The method can be called with scalars
    if not sp.issparse(X):
        X = np.atleast_2d(X)
    W = np.atleast_2d(W)
    H = np.atleast_2d(H)

    # Frobenius norm
    if beta == 2:
        # Avoid the creation of the dense np.dot(W, H) if X is sparse.
        if sp.issparse(X):
            norm_X = np.dot(X.data, X.data)
            norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
            cross_prod = trace_dot((X * H.T), W)
            res = (norm_X + norm_WH - 2. * cross_prod) / 2.
        else:
            res = squared_norm(X - np.dot(W, H)) / 2.

        if square_root:
            return np.sqrt(res * 2)
        else:
            return res

    if sp.issparse(X):
        # compute np.dot(W, H) only where X is nonzero
        WH_data = _special_sparse_dot(W, H, X).data
        X_data = X.data
    else:
        WH = np.dot(W, H)
        WH_data = WH.ravel()
        X_data = X.ravel()

    # do not affect the zeros: here 0 ** (-1) = 0 and not infinity
    indices = X_data > EPSILON
    WH_data = WH_data[indices]
    X_data = X_data[indices]

    # used to avoid division by zero
    WH_data[WH_data == 0] = EPSILON

    # generalized Kullback-Leibler divergence
    if beta == 1:
        # fast and memory efficient computation of np.sum(np.dot(W, H))
        sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1))
        # computes np.sum(X * log(X / WH)) only where X is nonzero
        div = X_data / WH_data
        res = np.dot(X_data, np.log(div))
        # add full np.sum(np.dot(W, H)) - np.sum(X)
        res += sum_WH - X_data.sum()

    # Itakura-Saito divergence
    elif beta == 0:
        div = X_data / WH_data
        res = np.sum(div) - np.product(X.shape) - np.sum(np.log(div))

    # beta-divergence, beta not in (0, 1, 2)
    else:
        if sp.issparse(X):
            # slow loop, but memory efficient computation of :
            # np.sum(np.dot(W, H) ** beta)
            sum_WH_beta = 0
            for i in range(X.shape[1]):
                sum_WH_beta += np.sum(np.dot(W, H[:, i])**beta)

        else:
            sum_WH_beta = np.sum(WH**beta)

        sum_X_WH = np.dot(X_data, WH_data**(beta - 1))
        res = (X_data**beta).sum() - beta * sum_X_WH
        res += sum_WH_beta * (beta - 1)
        res /= beta * (beta - 1)

    if square_root:
        return np.sqrt(2 * res)
    else:
        return res
Example #46
0
    def _frienemy_pruning(self, neighbors):
        """Implements the Online Pruning method (frienemy) to remove base
        classifiers that do not cross the region of competence. We consider
        that a classifier crosses the region of competence if it correctly
        classify at least one sample for each different class in the region.

        Returns
        -------
        DFP_mask : array of shape = [n_samples, n_classifiers]
                   Mask containing 1 for the selected base classifier and 0
                   otherwise.

        neighbors : array of shale = [n_samples, n_neighbors]
                    indices of the k nearest neighbors according to each
                    instance

        References
        ----------
        Oliveira, D.V.R., Cavalcanti, G.D.C. and Sabourin, R., Online Pruning
        of Base Classifiers for Dynamic Ensemble Selection,
        Pattern Recognition, vol. 72, December 2017, pp 44-58.
        """
        # using a for loop for processing a batch of samples temporarily.
        # Change later to numpy processing
        if neighbors.ndim < 2:
            neighbors = np.atleast_2d(neighbors)

        n_samples, _ = neighbors.shape
        mask = np.zeros((n_samples, self.n_classifiers_))

        for sample_idx in range(n_samples):
            # Check if query is in a indecision region
            neighbors_y = self.DSEL_target_[
                neighbors[sample_idx, :self.safe_k]]

            if len(set(neighbors_y)) > 1:
                # There are more than on class in the region of competence
                # (So it is an indecision region).

                # Check if the base classifier predict the correct label for
                # a sample belonging to each class.
                for clf_index in range(self.n_classifiers_):
                    predictions = self.DSEL_processed_[
                        neighbors[sample_idx, :self.safe_k], clf_index]
                    correct_class_pred = [self.DSEL_target_[index] for
                                          count, index in
                                          enumerate(neighbors[sample_idx,
                                                    :self.safe_k])
                                          if predictions[count] == 1]

                    # If that is true, it means that it correctly classified
                    # at least one neighbor for each class in
                    # the region of competence
                    if np.unique(correct_class_pred).size > 1:
                        mask[sample_idx, clf_index] = 1.0
                # Check if all classifiers were pruned
                if not np.count_nonzero(mask[sample_idx, :]):
                    # Do not apply the pruning mechanism.
                    mask[sample_idx, :] = 1.0

            else:
                # The sample is located in a safe region. All base classifiers
                # can predict the label
                mask[sample_idx, :] = 1.0

        return mask
z43=np.zeros((16,16))
print("all values in z43", z43)
#打印数组中的所有数值

z44=np.arange(100);z45=np.random.uniform(0,100)
index=(np.abs(z44-z45)).argmin()
print("closest value in z44 is", z44[index])
#给定标量,找到数组中最接近标量的值

z46=np.zeros(10,[('positon',[('x',float,1),('y',float,1)]),
                 ('color',[('r',float,1),('g',float,1),('b',float,1)])])
print("position and color are", z46)
#创建一个表示位置(x,y)和颜色(r,g,b)的结构化数组

z47=np.random.random((10,2));
x,y=np.atleast_2d(z47[:,0],z47[:,1])
D=np.sqrt((x-x.T)**2+(y-y.T)**2);
print("D in z47 is", D)
#对一个表示坐标形状为(100,2)的随机向量,找到点与点的距离
#import scipy
#import scipy.spatial
#D=scipy.spatial.distance.cdist(z47,z47);
#print(D)

#将一个32位的浮点数转换为对应的整数
z48=np.arange(10,dtype=np.float32);
z48=z48.astype(np.float32,copy=False);
print("z48 is", z48);

# z49=np.genfromtxt[1,2,3,4,5]
#读取以下文件
Example #48
0
def paciorek_hf(xx):
    xx = np.atleast_2d(xx)

    x1, x2 = xx.T
    return np.sin(1/(x1*x2))
Example #49
0
def FixedTauABC(T, eps_input, fixtau='satellite', Npart=1000, prior_name='try0', 
        observables=['fqz_multi'], abcrun=None, 
        restart=False, t_restart=None, eps_restart=None, **sim_kwargs):
    ''' Run ABC-PMC analysis for central galaxy SFH model with *FIXED* quenching 
    timescale

    Parameters
    ----------
    T : (int) 
        Number of iterations

    eps_input : (float)
        Starting epsilon threshold value 

    N_part : (int)
        Number of particles

    prior_name : (string)
        String that specifies what prior to use.

    abcrun : (string)
        String that specifies abc run information 
    '''
    if isinstance(eps_input, list):
        if len(eps_input) != len(observables): 
            raise ValueError
    if len(observables) > 1 and isinstance(eps_input, float): 
        raise ValueError

    # output abc run details
    sfinherit_kwargs, abcrun_flag = MakeABCrun(
            abcrun=abcrun, Niter=T, Npart=Npart, prior_name=prior_name, 
            eps_val=eps_input, restart=restart, **sim_kwargs) 

    # Data 
    data_sum = DataSummary(observables=observables)
    # Priors
    prior_min, prior_max = PriorRange(prior_name)
    prior = abcpmc.TophatPrior(prior_min, prior_max)    # ABCPMC prior object

    def Simz(tt):       # Simulator (forward model) 
        gv_slope = tt[0]
        gv_offset = tt[1]
        fudge_slope = tt[2]
        fudge_offset = tt[3]

        sim_kwargs = sfinherit_kwargs.copy()
        sim_kwargs['sfr_prop']['gv'] = {'slope': gv_slope, 'fidmass': 10.5, 'offset': gv_offset}
        sim_kwargs['evol_prop']['fudge'] = {'slope': fudge_slope, 'fidmass': 10.5, 'offset': fudge_offset}
        sim_kwargs['evol_prop']['tau'] = {'name': fixtau}
        
        sim_output = SimSummary(observables=observables, **sim_kwargs)
        return sim_output

    theta_file = lambda pewl: ''.join([code_dir(), 
        'dat/pmc_abc/', 'CenQue_theta_t', str(pewl), '_', abcrun_flag, 
        '.fixedtau.', fixtau, '.dat']) 
    w_file = lambda pewl: ''.join([code_dir(), 
        'dat/pmc_abc/', 'CenQue_w_t', str(pewl), '_', abcrun_flag, 
        '.fixedtau.', fixtau, '.dat']) 
    dist_file = lambda pewl: ''.join([code_dir(), 
        'dat/pmc_abc/', 'CenQue_dist_t', str(pewl), '_', abcrun_flag, 
        '.fixedtau.', fixtau, '.dat']) 
    eps_file = ''.join([code_dir(), 
        'dat/pmc_abc/epsilon_', abcrun_flag, 
        '.fixedtau.', fixtau, '.dat']) 

    distfn = RhoFq
   
    if restart:
        if t_restart is None: 
            raise ValueError

        last_thetas = np.loadtxt(theta_file(t_restart))
        last_ws = np.loadtxt(w_file(t_restart))
        last_dist = np.loadtxt(dist_file(t_restart))

        init_pool = abcpmc.PoolSpec(t_restart, None, None, last_thetas, last_dist, last_ws)
    else: 
        init_pool = None 

    eps = abcpmc.ConstEps(T, eps_input)
    try:
        mpi_pool = mpi_util.MpiPool()
        abcpmc_sampler = abcpmc.Sampler(
                N=Npart,                # N_particles
                Y=data_sum,             # data
                postfn=Simz,            # simulator 
                dist=distfn,           # distance function  
                pool=mpi_pool)  
    except AttributeError: 
        abcpmc_sampler = abcpmc.Sampler(
                N=Npart,                # N_particles
                Y=data_sum,             # data
                postfn=Simz,            # simulator 
                dist=distfn)           # distance function  
    abcpmc_sampler.particle_proposal_cls = abcpmc.ParticleProposal

    pools = []
    if init_pool is None: 
        f = open(eps_file, "w")
        f.close()
    eps_str = ''
    for pool in abcpmc_sampler.sample(prior, eps, pool=init_pool):
        print '----------------------------------------'
        print 'eps ', pool.eps
        new_eps_str = str(pool.eps)+'\t'+str(pool.ratio)+'\n'
        if eps_str != new_eps_str:  # if eps is different, open fiel and append 
            f = open(eps_file, "a")
            eps_str = new_eps_str
            f.write(eps_str)
            f.close()

        print("T:{0},ratio: {1:>.4f}".format(pool.t, pool.ratio))
        print eps(pool.t)

        # write theta, weights, and distances to file 
        np.savetxt(theta_file(pool.t), pool.thetas, 
            header='gv_slope, gv_offset, fudge_slope, fudge_offset')
        np.savetxt(w_file(pool.t), pool.ws)
        np.savetxt(dist_file(pool.t), pool.dists)
    
        # update epsilon based on median thresholding 
        if len(observables) == 1: 
            eps.eps = np.median(pool.dists)
        else:
            #print pool.dists
            print np.median(np.atleast_2d(pool.dists), axis = 0)
            eps.eps = np.median(np.atleast_2d(pool.dists), axis = 0)
        print '----------------------------------------'
        pools.append(pool)

    return pools 
Example #50
0
def _concatenate_2d(to_concat, axis: int):
    # coerce to 2d if needed & concatenate
    if axis == 1:
        to_concat = [np.atleast_2d(x) for x in to_concat]
    return np.concatenate(to_concat, axis=axis)
def callable_rbf_kernel(x, y, **kwds):
    # Callable version of pairwise.rbf_kernel.
    K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
    return K
Example #52
0
    def test_sequential_kl_oed(self):
        """
        Observations collected ARE used to inform subsequent designs
        """
        nrandom_vars = 1
        noise_std = 1
        ndesign = 5
        nouter_loop_samples = int(1e4)
        ninner_loop_samples = 31

        ncandidates = 6
        design_candidates = np.linspace(-1, 1, ncandidates)[None, :]

        def obs_fun(samples):
            assert design_candidates.ndim == 2
            assert samples.ndim == 2
            Amat = design_candidates.T
            return Amat.dot(samples).T

        prior_variable = IndependentMultivariateRandomVariable(
            [stats.norm(0, 1)] * nrandom_vars)

        true_sample = np.array([.4] * nrandom_vars)[:, None]

        def obs_process(new_design_indices):
            obs = obs_fun(true_sample)[:, new_design_indices]
            obs += oed.noise_fun(obs)
            return obs

        x_quad, w_quad = gauss_hermite_pts_wts_1D(ninner_loop_samples)

        def generate_inner_prior_samples_gauss(n):
            # use precomputed samples so to avoid cost of regenerating
            assert n == x_quad.shape[0]
            return x_quad[None, :], w_quad

        generate_inner_prior_samples = generate_inner_prior_samples_gauss

        # Define initial design
        init_design_indices = np.array([ncandidates // 2])
        oed = BayesianSequentialKLOED(design_candidates, obs_fun, noise_std,
                                      prior_variable, obs_process,
                                      nouter_loop_samples, ninner_loop_samples,
                                      generate_inner_prior_samples)
        oed.populate()
        oed.set_collected_design_indices(init_design_indices)

        prior_mean = oed.prior_variable.get_statistics('mean')
        prior_cov = np.diag(prior_variable.get_statistics('var')[:, 0])
        prior_cov_inv = np.linalg.inv(prior_cov)

        exact_post_mean_prev = prior_mean
        exact_post_cov_prev = prior_cov
        post_var_prev = stats.multivariate_normal(mean=exact_post_mean_prev[:,
                                                                            0],
                                                  cov=exact_post_cov_prev)
        selected_indices = init_design_indices

        # Because of Monte Carlo error set step tols individually
        # It is too expensive to up the number of outer_loop samples to
        # reduce errors
        step_tols = [7.3e-3, 6.5e-2, 3.3e-2, 1.6e-1]

        for step in range(len(init_design_indices), ndesign):
            current_design = design_candidates[:, oed.collected_design_indices]
            noise_cov_inv = np.eye(current_design.shape[1]) / noise_std**2

            # Compute posterior moving from previous posterior and using
            # only the most recently collected data
            noise_cov_inv_incr = np.eye(
                selected_indices.shape[0]) / noise_std**2
            exact_post_mean, exact_post_cov = \
                laplace_posterior_approximation_for_linear_models(
                    design_candidates[:, selected_indices].T,
                    exact_post_mean_prev, np.linalg.inv(exact_post_cov_prev),
                    noise_cov_inv_incr, oed.collected_obs[:, -1:].T)

            # check using current posteior as prior and only using new
            # data (above) produces the same posterior as using original prior
            # and all collected data (from_prior approach). The posteriors
            # should be the same but the evidences will be difference.
            # This is tested below
            exact_post_mean_from_prior, exact_post_cov_from_prior = \
                laplace_posterior_approximation_for_linear_models(
                    current_design.T, prior_mean, prior_cov_inv, noise_cov_inv,
                    oed.collected_obs.T)

            assert np.allclose(exact_post_mean, exact_post_mean_from_prior)
            assert np.allclose(exact_post_cov, exact_post_cov_from_prior)

            # Compute PDF of current posterior that uses all collected data
            post_var = stats.multivariate_normal(
                mean=exact_post_mean[:, 0].copy(), cov=exact_post_cov.copy())

            # Compute evidence moving from previous posterior to
            # new posterior (not initial prior to posterior).
            # Values can be computed exactly for Gaussian prior and noise
            gauss_evidence = laplace_evidence(
                lambda x: np.exp(
                    gaussian_loglike_fun(
                        oed.collected_obs[:, -1:],
                        obs_fun(x)[:, oed.collected_design_indices[-1:]],
                        noise_std))[:, 0],
                lambda y: np.atleast_2d(post_var_prev.pdf(y.T)).T,
                exact_post_cov, exact_post_mean)

            # Compute evidence using Gaussian quadrature rule. This
            # is possible for this low-dimensional example.
            quad_loglike_vals = np.exp(
                gaussian_loglike_fun(
                    oed.collected_obs[:, -1:],
                    obs_fun(
                        x_quad[None, :])[:, oed.collected_design_indices[-1:]],
                    noise_std))[:, 0]
            # we must divide integarnd by initial prior_pdf since it is
            # already implicilty included via the quadrature weights
            integrand_vals = quad_loglike_vals * post_var_prev.pdf(
                x_quad[:, None]) / prior_variable.pdf(x_quad[None, :])[:, 0]
            quad_evidence = integrand_vals.dot(w_quad)
            # print(quad_evidence, gauss_evidence)
            assert np.allclose(gauss_evidence, quad_evidence), step

            # print('G', gauss_evidence, oed.evidence)
            assert np.allclose(gauss_evidence, oed.evidence), step

            # compute the evidence of moving from the initial prior
            # to the current posterior. This will be used for testing later
            gauss_evidence_from_prior = laplace_evidence(
                lambda x: np.exp(
                    gaussian_loglike_fun(
                        oed.collected_obs,
                        obs_fun(x)[:, oed.collected_design_indices], noise_std)
                )[:, 0], prior_variable.pdf, exact_post_cov, exact_post_mean)

            # Copy current state of OED before new data is determined
            # This copy will be used to compute Laplace based utility and
            # evidence values for testing
            oed_copy = copy.deepcopy(oed)

            # Update the design
            utility_vals, selected_indices = oed.update_design()
            new_obs = oed.obs_process(selected_indices)
            oed.update_observations(new_obs)
            utility = utility_vals[selected_indices]

            # Re-compute the evidences that were used to update the design
            # above. This will be used for testing later
            # print('D', oed_copy.evidence)
            evidences = oed_copy.compute_expected_utility(
                oed_copy.collected_design_indices, selected_indices, True)[1]

            # print('Collected plus selected indices',
            #       oed.collected_design_indices,
            #       oed_copy.collected_design_indices, selected_indices)

            # For all outer loop samples compute the posterior exactly
            # and compute intermediate values for testing. While OED
            # considers all possible candidate design indices
            # Here we just test the one that was chosen last when
            # design was updated
            exact_evidences = np.empty(nouter_loop_samples)
            exact_kl_divs = np.empty_like(exact_evidences)
            for jj in range(nouter_loop_samples):
                # Fill obs with those predicted by outer loop sample
                idx = oed.collected_design_indices
                obs_jj = oed_copy.outer_loop_obs[jj:jj + 1, idx]
                # Overwrite the previouly simulated obs with collected obs.
                # Do not ovewrite the last value which is the potential
                # data used to compute expected utility
                obs_jj[:, :oed_copy.collected_obs.shape[1]] = \
                    oed_copy.collected_obs

                # Compute the posterior obtained by using predicted value
                # of outer loop sample
                noise_cov_inv_jj = np.eye(
                    selected_indices.shape[0]) / noise_std**2
                exact_post_mean_jj, exact_post_cov_jj = \
                    laplace_posterior_approximation_for_linear_models(
                        design_candidates[:, selected_indices].T,
                        exact_post_mean, np.linalg.inv(exact_post_cov),
                        noise_cov_inv_jj, obs_jj[:, -1].T)

                # Use post_pdf so measure change from current posterior (prior)
                # to new posterior
                gauss_evidence_jj = laplace_evidence(
                    lambda x: np.exp(
                        gaussian_loglike_fun(obs_jj[:, -1:],
                                             obs_fun(x)[:, selected_indices],
                                             noise_std))[:, 0],
                    lambda y: np.atleast_2d(post_var.pdf(y.T)).T,
                    exact_post_cov_jj, exact_post_mean_jj)
                exact_evidences[jj] = gauss_evidence_jj

                # Check quadrature gets the same answer
                quad_loglike_vals = np.exp(
                    gaussian_loglike_fun(
                        obs_jj[:, -1:],
                        obs_fun(x_quad[None, :])[:, selected_indices],
                        noise_std))[:, 0]
                integrand_vals = quad_loglike_vals * post_var.pdf(
                    x_quad[:, None]) / prior_variable.pdf(x_quad[None, :])[:,
                                                                           0]
                quad_evidence = integrand_vals.dot(w_quad)
                # print(quad_evidence, gauss_evidence_jj)
                assert np.allclose(gauss_evidence_jj, quad_evidence), step

                # Check that evidence of moving from current posterior
                # to new posterior with (potential data from outer-loop sample)
                # is equal to the evidence of moving from
                # intitial prior to new posterior divide by the evidence
                # from moving from the initial prior to the current posterior
                gauss_evidence_jj_from_prior = laplace_evidence(
                    lambda x: np.exp(
                        gaussian_loglike_fun(obs_jj,
                                             obs_fun(x)[:, idx], noise_std)
                    )[:, 0], prior_variable.pdf, exact_post_cov_jj,
                    exact_post_mean_jj)
                # print(gauss_evidence_jj_from_prior/gauss_evidence_from_prior,
                #       gauss_evidence_jj)
                # print('gauss_evidence_from_prior', gauss_evidence_from_prior)
                assert np.allclose(
                    gauss_evidence_jj_from_prior / gauss_evidence_from_prior,
                    gauss_evidence_jj)

                gauss_kl_div = gaussian_kl_divergence(exact_post_mean_jj,
                                                      exact_post_cov_jj,
                                                      exact_post_mean,
                                                      exact_post_cov)
                # gauss_kl_div = gaussian_kl_divergence(
                #     exact_post_mean, exact_post_cov,
                #     exact_post_mean_jj, exact_post_cov_jj)
                exact_kl_divs[jj] = gauss_kl_div

            # print(evidences[:, 0], exact_evidences)
            assert np.allclose(evidences[:, 0], exact_evidences)

            # Outer loop samples are from prior. Use importance reweighting
            # to sample from previous posterior. This step is only relevant
            # for open loop design (used here)
            # where observed data informs current estimate
            # of parameters. Closed loop design (not used here)
            # never collects data and so it always samples from the prior.
            post_weights = post_var.pdf(
                oed.outer_loop_prior_samples.T) / post_var_prev.pdf(
                    oed.outer_loop_prior_samples.T) / oed.nouter_loop_samples
            laplace_utility = np.sum(exact_kl_divs * post_weights)
            # print('u', (utility-laplace_utility)/laplace_utility, step)
            assert np.allclose(utility,
                               laplace_utility,
                               rtol=step_tols[step - 1])

            exact_post_mean_prev = exact_post_mean
            exact_post_cov_prev = exact_post_cov
            post_var_prev = post_var
Example #53
0
    def decode_ext(self, X, lengths=None, w=None, ext_shape=None):
        """Find memoryless most likely state sequence corresponding to ``X``,
        (that is, the symbol-by-symbol MAP sequence) and then map those
        states to an associated external representation (e.g. position).

        example 1d
        ----------
        posterior_pos, bdries, mode_pth, mean_pth = hmm.decode_ext(bst_no_ripple, ext_shape=(vtc.n_bins,))
        mean_pth = vtc.bins[0] + mean_pth*(vtc.bins[-1] - vtc.bins[0])

        example 2d
        ----------
        posterior_, bdries_, mode_pth_, mean_pth_ = hmm.decode_ext(bst, ext_shape=(ext_nx, ext_ny))
        mean_pth_[0,:] = vtc2d.xbins[0] + mean_pth_[0,:]*(vtc2d.xbins[-1] - vtc2d.xbins[0])
        mean_pth_[1,:] = vtc2d.ybins[0] + mean_pth_[1,:]*(vtc2d.ybins[-1] - vtc2d.ybins[0])

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            Feature matrix of individual samples.
            OR
            nelpy.BinnedSpikeTrainArray
        lengths : array-like of integers, shape (n_sequences, ), optional
            Lengths of the individual sequences in ``X``. The sum of
            these should be ``n_samples``. This is not used when X is
            a nelpy.BinnedSpikeTrainArray, in which case the lenghts are
            automatically inferred.

        Returns
        -------
        ext_posteriors, bdries, mode_pth, mean_pth

        ext_posteriors : array, shape (n_extern, n_samples)
            State-membership probabilities for each sample in ``X``.

        See Also
        --------
        score_samples : Compute the log probability under the model and
            posteriors.

        score : Compute the log probability under the model.
        """

        _, n_extern = self._extern_.shape

        if ext_shape is None:
            ext_shape = (n_extern)

        if not isinstance(X, BinnedSpikeTrainArray):
            # assume we have a feature matrix
            raise NotImplementedError("not implemented yet.")
            if w is not None:
                raise NotImplementedError(
                    "sliding window decoding for feature matrices not yet implemented!"
                )
        else:
            # we have a BinnedSpikeTrainArray
            pass
        if len(ext_shape) == 1:
            # do old style decoding
            # TODO: this can be improved to be like the 2D case!
            state_posteriors, lengths = self.predict_proba(X=X,
                                                           lengths=lengths,
                                                           w=w,
                                                           returnLengths=True)
            # fixy = np.mean(self._extern_ * np.arange(n_extern), axis=1)
            # mean_pth = np.sum(state_posteriors.T*fixy, axis=1) # range 0 to 1
            ext_posteriors = np.dot((self._extern_ * np.arange(n_extern)).T,
                                    state_posteriors)
            # normalize ext_posterior distributions:
            ext_posteriors = ext_posteriors / ext_posteriors.sum(axis=0)
            mean_pth = (ext_posteriors.T *
                        np.atleast_2d(np.linspace(0, 1, n_extern))).sum(axis=1)
            mode_pth = np.argmax(ext_posteriors,
                                 axis=0) / n_extern  # range 0 to n_extern

        elif len(ext_shape) == 2:
            ext_posteriors = np.zeros((ext_shape[0], ext_shape[1], X.n_bins))
            # get posterior distribution over states, of size (num_States, n_extern)
            state_posteriors, lengths = self.predict_proba(X=X,
                                                           lengths=lengths,
                                                           w=w,
                                                           returnLengths=True)
            # for each bin, compute the distribution in the external domain
            for bb in range(X.n_bins):
                ext_posteriors[:, :, bb] = np.reshape(
                    (self._extern_ * state_posteriors[:, [bb]]).sum(axis=0),
                    ext_shape)
            # now compute mean and mode paths
            expected_x = np.sum(
                (ext_posteriors.sum(axis=1) *
                 np.atleast_2d(np.linspace(0, 1, ext_shape[0])).T),
                axis=0)
            expected_y = np.sum(
                (ext_posteriors.sum(axis=0) *
                 np.atleast_2d(np.linspace(0, 1, ext_shape[1])).T),
                axis=0)
            mean_pth = np.vstack((expected_x, expected_y))

            mode_pth = np.zeros((2, X.n_bins))
            for tt in range(X.n_bins):
                if np.any(np.isnan(ext_posteriors[:, :, tt])):
                    mode_pth[0, tt] = np.nan
                    mode_pth[0, tt] = np.nan
                else:
                    x_, y_ = np.unravel_index(
                        np.argmax(ext_posteriors[:, :, tt]),
                        (ext_shape[0], ext_shape[1]))
                    mode_pth[0, tt] = x_ / ext_shape[0]
                    mode_pth[1, tt] = y_ / ext_shape[1]

            ext_posteriors = np.transpose(ext_posteriors, axes=[1, 0, 2])
        else:
            raise TypeError("shape not currently supported!")

        bdries = np.cumsum(lengths)

        return ext_posteriors, bdries, mode_pth, mean_pth
 def get_model_parameters(self):
     """
     Returns a 2D numpy array with the parameters of the model
     """
     return np.atleast_2d(self.model[:])
Example #55
0

def run_model(k_array):
    np.savetxt(HK_PATH, k_array, fmt="%15.6E", delimiter='')
    os.system(EXE_PATH + " " + NAM_PATH)
    return np.loadtxt(HDS_PATH)


[
    shutil.copy2(os.path.join(RAW_MODEL_PATH, f), f)
    for f in os.listdir(RAW_MODEL_PATH)
]
shutil.copy2(HK_PATH, HK_PATH + ".init")
shutil.copy2(HDS_PATH, HDS_PATH + ".init")

initial_hk = np.atleast_2d(np.loadtxt(HK_PATH + ".init"))
initial_hds = np.loadtxt(HDS_PATH + ".init")
#strt_path = os.path.join("model","ref_cal","strt_Layer_1.ref")
#if os.path.exists(strt_path):os.remove(strt_path)
#shutil.copy2(HDS_PATH,strt_path)
new_hds = initial_hds
np.savetxt(HK_PATH, initial_hk, fmt="%15.6E", delimiter='')

delx = 10
cell_nums = np.arange(delx,
                      (initial_hk.shape[1] * delx) + delx, delx) - (0.5 * delx)
fig = plt.figure(figsize=(8, 8))
ax_cal = plt.axes((0.1, 0.575, 0.8, 0.4))
ax_prd = plt.axes((0.1, 0.15, 0.8, 0.4))

for i, (col) in enumerate(cell_nums):
Example #56
0
print(__doc__)

import numpy as np
from matplotlib import pyplot as plt
import re
import os
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C

np.random.seed(1)

for score in range(0, 1):

    # ----------------------------------------------------------------------
    #  First the noiseless case
    X = np.atleast_2d([x for x in range(0, 7)]).T

    y = np.array([0.1, 0.2, 0.5, 0.6, 0.8, 0.9, 0.1])

    #y = np.array([x / 2 for x in range(0, 10)])
    print(y)

    # Mesh the input space for evaluations of the real function, the prediction and
    # its MSE
    x = np.atleast_2d(np.linspace(0, 7, 10000)).T

    # Instanciate a Gaussian Process model
    kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
    gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)

    # Fit to data using Maximum Likelihood Estimation of the parameters
Example #57
0
def render_policy_params(
    policy: Policy,
    env_spec: EnvSpec,
    cmap_name: str = 'RdBu',
    ax_hm: plt.Axes = None,
    annotate: bool = True,
    annotation_valfmt: str = '{x:.2f}',
    colorbar_label: str = '',
    xlabel: str = None,
    ylabel: str = None,
) -> plt.Figure:
    """
    Plot the weights and biases as images, and a color bar.

    .. note::
        If you want to have a tight layout, it is best to pass axes of a figure with `tight_layout=True` or
        `constrained_layout=True`.

    :param policy: policy to visualize
    :param env_spec: environment specification
    :param cmap_name: name of the color map, e.g. 'inferno', 'RdBu', or 'viridis'
    :param ax_hm: axis to draw the heat map onto, if equal to None a new figure is opened
    :param annotate: select if the heat map should be annotated
    :param annotation_valfmt: format of the annotations inside the heat map, irrelevant if annotate = False
    :param colorbar_label: label for the color bar
    :param xlabel: label for the x axis
    :param ylabel: label for the y axis
    :return: handles to figures
    """
    if not isinstance(policy, nn.Module):
        raise pyrado.TypeErr(given=policy, expected_type=nn.Module)
    cmap = plt.get_cmap(cmap_name)

    # Create axes and subplots depending on the NN structure
    num_rows = len(list(policy.parameters()))
    fig = plt.figure(figsize=(14, 10), tight_layout=False)
    gs = fig.add_gridspec(num_rows, 2,
                          width_ratios=[14,
                                        1])  # right column is the color bar
    ax_cb = fig.add_subplot(gs[:, 1])

    # Accumulative norm for the colors
    norm = AccNorm()

    for i, (name, param) in enumerate(policy.named_parameters()):
        # Create current axis
        ax = plt.subplot(gs[i, 0])
        ax.set_title(name.replace('_', '\_'))

        # Convert the data and plot the image with the colors proportional to the parameters
        if param.ndim == 3:
            # For example convolution layers
            param = param.flatten(0)
            print_cbt(
                f'Flattened the first dimension of the {name} parameter tensor.',
                'y')
        data = np.atleast_2d(param.detach().numpy())

        img = plt.imshow(data,
                         cmap=cmap,
                         norm=norm,
                         aspect='auto',
                         origin='lower')

        if annotate:
            _annotate_img(
                img,
                thold_lo=0.75 * min(policy.param_values).detach().numpy(),
                thold_up=0.75 * max(policy.param_values).detach().numpy(),
                valfmt=annotation_valfmt)

        # Prepare the ticks
        if isinstance(policy, ADNPolicy):
            if name == 'obs_layer.weight':
                ax.set_xticks(np.arange(env_spec.obs_space.flat_dim))
                ax.set_yticks(np.arange(env_spec.act_space.flat_dim))
                ax.set_xticklabels(
                    ensure_no_subscript(env_spec.obs_space.labels))
                ax.set_yticklabels(ensure_math_mode(env_spec.act_space.labels))
            elif name in [
                    'obs_layer.bias', 'nonlin_layer.log_weight',
                    'nonlin_layer.bias'
            ]:
                ax.set_xticks(np.arange(env_spec.act_space.flat_dim))
                ax.set_xticklabels(ensure_math_mode(env_spec.act_space.labels))
                ax.yaxis.set_major_locator(ticker.NullLocator())
                ax.yaxis.set_minor_formatter(ticker.NullFormatter())
            elif name == 'prev_act_layer.weight':
                ax.set_xticks(np.arange(env_spec.act_space.flat_dim))
                ax.set_yticks(np.arange(env_spec.act_space.flat_dim))
                ax.set_xticklabels(ensure_math_mode(env_spec.act_space.labels))
                ax.set_yticklabels(ensure_math_mode(env_spec.act_space.labels))
            elif name in ['_log_tau', '_log_kappa', '_log_capacity']:
                ax.xaxis.set_major_locator(ticker.NullLocator())
                ax.yaxis.set_major_locator(ticker.NullLocator())
                ax.xaxis.set_minor_formatter(ticker.NullFormatter())
                ax.yaxis.set_minor_formatter(ticker.NullFormatter())
            else:
                ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
                ax.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))

        elif isinstance(policy, NFPolicy):
            if name == 'obs_layer.weight':
                ax.set_xticks(np.arange(env_spec.obs_space.flat_dim))
                ax.yaxis.set_major_locator(ticker.NullLocator())
                ax.set_xticklabels(
                    ensure_no_subscript(env_spec.obs_space.labels))
                ax.yaxis.set_minor_formatter(ticker.NullFormatter())
            elif name in [
                    '_log_tau', '_potentials_init', 'resting_level',
                    'obs_layer.bias', 'conv_layer.weight',
                    'nonlin_layer.log_weight', 'nonlin_layer.bias'
            ]:
                ax.xaxis.set_major_locator(ticker.NullLocator())
                ax.yaxis.set_major_locator(ticker.NullLocator())
                ax.xaxis.set_minor_formatter(ticker.NullFormatter())
                ax.yaxis.set_minor_formatter(ticker.NullFormatter())
            elif name == 'act_layer.weight':
                ax.xaxis.set_major_locator(ticker.NullLocator())
                ax.set_yticks(np.arange(env_spec.act_space.flat_dim))
                ax.xaxis.set_minor_formatter(ticker.NullFormatter())
                ax.set_yticklabels(ensure_math_mode(env_spec.act_space.labels))
            else:
                ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
                ax.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))

        # Add the color bar (call this within the loop to make the AccNorm scan every image)
        colorbar.ColorbarBase(ax_cb,
                              cmap=cmap,
                              norm=norm,
                              label=colorbar_label)

    # Increase the vertical white spaces between the subplots
    plt.subplots_adjust(hspace=.7, wspace=0.1)

    # Set the labels
    if xlabel is not None:
        ax_hm.set_xlabel(xlabel)
    if ylabel is not None:
        ax_hm.set_ylabel(ylabel)

    return fig
Example #58
0
def get_timestep_netcdf(force, tstep, point=None):
    """
    Pull out a time step from the forcing files and
    place that time step into a dict

    Args:
        force:   input array of forcing variables
        tstep:   datetime timestep

    Returns:
        inpt:    dictionary of forcing variable images
    """

    inpt = {}

    # map function from these values to the ones requried by snobal
    map_val = {
        'air_temp': 'T_a',
        'net_solar': 'S_n',
        'thermal': 'I_lw',
        'vapor_pressure': 'e_a',
        'wind_speed': 'u',
        'soil_temp': 'T_g',
        'precip_mass': 'm_pp',
        'percent_snow': 'percent_snow',
        'snow_density': 'rho_snow',
        'precip_temp': 'T_pp'
    }

    for f in force.keys():

        if isinstance(force[f], np.ndarray):
            # If it's a constant value then just read in the numpy array
            # pull out the value
            if point is None:
                inpt[map_val[f]] = force[f].copy(
                )  # ensures not a reference (especially if T_g)
            else:
                inpt[map_val[f]] = np.atleast_2d(force[f][point[0], point[1]])

        else:
            # determine the index in the netCDF file

            # compare the dimensions and variables to get the variable name
            v = list(
                set(force[f].variables.keys()) -
                set(force[f].dimensions.keys()))
            v = [fv for fv in v if fv != 'projection'][0]

            # make sure you're in the same timezone
            if hasattr(force[f].variables['time'], 'time_zone'):
                tstep_zone = tstep.astimezone(
                    pytz.timezone(force[f].variables['time'].time_zone))
                tstep_zone = tstep.tz_localize(None)
            else:
                tstep_zone = tstep.tz_localize(None)

            # find the index based on the time step
            t = nc.date2index(tstep_zone,
                              force[f].variables['time'],
                              calendar=force[f].variables['time'].calendar,
                              select='exact')

            # pull out the value
            if point is None:
                inpt[map_val[f]] = force[f].variables[v][t, :].astype(
                    np.float64)
            else:
                inpt[map_val[f]] = np.atleast_2d(
                    force[f].variables[v][t, point[0],
                                          point[1]].astype(np.float64))

    # convert from C to K
    inpt['T_a'] += FREEZE
    inpt['T_pp'] += FREEZE
    inpt['T_g'] += FREEZE

    return inpt
Example #59
0
def test_solve_continuous_are():
    mat6 = np.load(os.path.join(os.path.abspath(os.path.dirname(__file__)),
                                'data', 'carex_6_data.npz'))
    mat15 = np.load(os.path.join(os.path.abspath(os.path.dirname(__file__)),
                                 'data', 'carex_15_data.npz'))
    mat18 = np.load(os.path.join(os.path.abspath(os.path.dirname(__file__)),
                                 'data', 'carex_18_data.npz'))
    mat19 = np.load(os.path.join(os.path.abspath(os.path.dirname(__file__)),
                                 'data', 'carex_19_data.npz'))
    mat20 = np.load(os.path.join(os.path.abspath(os.path.dirname(__file__)),
                                 'data', 'carex_20_data.npz'))
    cases = [
        # Carex examples taken from (with default parameters):
        # [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark
        #     Examples for the Numerical Solution of Algebraic Riccati
        #     Equations II: Continuous-Time Case', Tech. Report SPC 95_23,
        #     Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995.
        #
        # The format of the data is (a, b, q, r, knownfailure), where
        # knownfailure is None if the test passes or a string
        # indicating the reason for failure.
        #
        # Test Case 0: carex #1
        (np.diag([1.], 1),
         np.array([[0], [1]]),
         block_diag(1., 2.),
         1,
         None),
        # Test Case 1: carex #2
        (np.array([[4, 3], [-4.5, -3.5]]),
         np.array([[1], [-1]]),
         np.array([[9, 6], [6, 4.]]),
         1,
         None),
        # Test Case 2: carex #3
        (np.array([[0, 1, 0, 0],
                   [0, -1.89, 0.39, -5.53],
                   [0, -0.034, -2.98, 2.43],
                   [0.034, -0.0011, -0.99, -0.21]]),
         np.array([[0, 0], [0.36, -1.6], [-0.95, -0.032], [0.03, 0]]),
         np.array([[2.313, 2.727, 0.688, 0.023],
                   [2.727, 4.271, 1.148, 0.323],
                   [0.688, 1.148, 0.313, 0.102],
                   [0.023, 0.323, 0.102, 0.083]]),
         np.eye(2),
         None),
        # Test Case 3: carex #4
        (np.array([[-0.991, 0.529, 0, 0, 0, 0, 0, 0],
                   [0.522, -1.051, 0.596, 0, 0, 0, 0, 0],
                   [0, 0.522, -1.118, 0.596, 0, 0, 0, 0],
                   [0, 0, 0.522, -1.548, 0.718, 0, 0, 0],
                   [0, 0, 0, 0.922, -1.64, 0.799, 0, 0],
                   [0, 0, 0, 0, 0.922, -1.721, 0.901, 0],
                   [0, 0, 0, 0, 0, 0.922, -1.823, 1.021],
                   [0, 0, 0, 0, 0, 0, 0.922, -1.943]]),
         np.array([[3.84, 4.00, 37.60, 3.08, 2.36, 2.88, 3.08, 3.00],
                   [-2.88, -3.04, -2.80, -2.32, -3.32, -3.82, -4.12, -3.96]]
                  ).T * 0.001,
         np.array([[1.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.1],
                   [0.0, 1.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
                   [0.0, 0.0, 1.0, 0.0, 0.0, 0.5, 0.0, 0.0],
                   [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
                   [0.5, 0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
                   [0.0, 0.0, 0.5, 0.0, 0.0, 0.1, 0.0, 0.0],
                   [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0],
                   [0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1]]),
         np.eye(2),
         None),
        # Test Case 4: carex #5
        (np.array(
          [[-4.019, 5.120, 0., 0., -2.082, 0., 0., 0., 0.870],
           [-0.346, 0.986, 0., 0., -2.340, 0., 0., 0., 0.970],
           [-7.909, 15.407, -4.069, 0., -6.450, 0., 0., 0., 2.680],
           [-21.816, 35.606, -0.339, -3.870, -17.800, 0., 0., 0., 7.390],
           [-60.196, 98.188, -7.907, 0.340, -53.008, 0., 0., 0., 20.400],
           [0, 0, 0, 0, 94.000, -147.200, 0., 53.200, 0.],
           [0, 0, 0, 0, 0, 94.000, -147.200, 0, 0],
           [0, 0, 0, 0, 0, 12.800, 0.000, -31.600, 0],
           [0, 0, 0, 0, 12.800, 0.000, 0.000, 18.800, -31.600]]),
         np.array([[0.010, -0.011, -0.151],
                   [0.003, -0.021, 0.000],
                   [0.009, -0.059, 0.000],
                   [0.024, -0.162, 0.000],
                   [0.068, -0.445, 0.000],
                   [0.000, 0.000, 0.000],
                   [0.000, 0.000, 0.000],
                   [0.000, 0.000, 0.000],
                   [0.000, 0.000, 0.000]]),
         np.eye(9),
         np.eye(3),
         None),
        # Test Case 5: carex #6
        (mat6['A'], mat6['B'], mat6['Q'], mat6['R'], None),
        # Test Case 6: carex #7
        (np.array([[1, 0], [0, -2.]]),
         np.array([[1e-6], [0]]),
         np.ones((2, 2)),
         1.,
         'Bad residual accuracy'),
        # Test Case 7: carex #8
        (block_diag(-0.1, -0.02),
         np.array([[0.100, 0.000], [0.001, 0.010]]),
         np.array([[100, 1000], [1000, 10000]]),
         np.ones((2, 2)) + block_diag(1e-6, 0),
         None),
        # Test Case 8: carex #9
        (np.array([[0, 1e6], [0, 0]]),
         np.array([[0], [1.]]),
         np.eye(2),
         1.,
         None),
        # Test Case 9: carex #10
        (np.array([[1.0000001, 1], [1., 1.0000001]]),
         np.eye(2),
         np.eye(2),
         np.eye(2),
         None),
        # Test Case 10: carex #11
        (np.array([[3, 1.], [4, 2]]),
         np.array([[1], [1]]),
         np.array([[-11, -5], [-5, -2.]]),
         1.,
         None),
        # Test Case 11: carex #12
        (np.array([[7000000., 2000000., -0.],
                   [2000000., 6000000., -2000000.],
                   [0., -2000000., 5000000.]]) / 3,
         np.eye(3),
         np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]]).dot(
                np.diag([1e-6, 1, 1e6])).dot(
            np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]])) / 9,
         np.eye(3) * 1e6,
         'Bad Residual Accuracy'),
        # Test Case 12: carex #13
        (np.array([[0, 0.4, 0, 0],
                   [0, 0, 0.345, 0],
                   [0, -0.524e6, -0.465e6, 0.262e6],
                   [0, 0, 0, -1e6]]),
         np.array([[0, 0, 0, 1e6]]).T,
         np.diag([1, 0, 1, 0]),
         1.,
         None),
        # Test Case 13: carex #14
        (np.array([[-1e-6, 1, 0, 0],
                   [-1, -1e-6, 0, 0],
                   [0, 0, 1e-6, 1],
                   [0, 0, -1, 1e-6]]),
         np.ones((4, 1)),
         np.ones((4, 4)),
         1.,
         None),
        # Test Case 14: carex #15
        (mat15['A'], mat15['B'], mat15['Q'], mat15['R'], None),
        # Test Case 15: carex #16
        (np.eye(64, 64, k=-1) + np.eye(64, 64)*(-2.) + np.rot90(
                 block_diag(1, np.zeros((62, 62)), 1)) + np.eye(64, 64, k=1),
         np.eye(64),
         np.eye(64),
         np.eye(64),
         None),
        # Test Case 16: carex #17
        (np.diag(np.ones((20, )), 1),
         np.flipud(np.eye(21, 1)),
         np.eye(21, 1) * np.eye(21, 1).T,
         1,
         'Bad Residual Accuracy'),
        # Test Case 17: carex #18
        (mat18['A'], mat18['B'], mat18['Q'], mat18['R'], None),
        # Test Case 18: carex #19
        (mat19['A'], mat19['B'], mat19['Q'], mat19['R'],
         'Bad Residual Accuracy'),
        # Test Case 19: carex #20
        (mat20['A'], mat20['B'], mat20['Q'], mat20['R'],
         'Bad Residual Accuracy')
        ]
    # Makes the minimum precision requirements customized to the test.
    # Here numbers represent the number of decimals that agrees with zero
    # matrix when the solution x is plugged in to the equation.
    #
    # res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2
    #
    # If the test is failing use "None" for that entry.
    #
    min_decimal = (14, 12, 13, 14, 11, 6, None, 5, 7, 14, 14,
                   None, 9, 14, 13, 14, None, 12, None, None)

    def _test_factory(case, dec):
        """Checks if 0 = XA + A'X - XB(R)^{-1} B'X + Q is true"""
        a, b, q, r, knownfailure = case
        if knownfailure:
            pytest.xfail(reason=knownfailure)

        x = solve_continuous_are(a, b, q, r)
        res = x.dot(a) + a.conj().T.dot(x) + q
        out_fact = x.dot(b)
        res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T))
        assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
Example #60
0
def surface_to_parcel(values,
                      labels,
                      weights=None,
                      target_labels=None,
                      red_op='mean',
                      axis=0,
                      dtype=np.float):
    """Summarize data in `values` according to `labels` (author: @OualidBenkarim)

    Parameters
    ----------
    values : 1D or 2D ndarray
        Array of values.
    labels : name of parcellation or 1D ndarray, shape = (n_lab,)
        Labels used summarize values.
    weights : 1D ndarray, shape = (n_lab,), optional
        Weights associated with labels. Only used when `red_op` is
        'average', 'mean', 'sum' or 'mode'. Weights are not normalized.
        Default is None.
    target_labels : 1D ndarray, optional
        Target labels. Arrange new array following the ordering of labels
        in the `target_labels`. When None, new array is arranged in ascending
        order of `labels`. Default is None.
    red_op : str or callable, optional
        How to summarize data. If str, options are: {'min', 'max', 'sum',
        'mean', 'median', 'mode', 'average'}. If callable, it should receive
        a 1D array of values, array of weights (or None) and return a scalar
        value. Default is 'mean'.
    dtype : dtype, optional
        Data type of output array. Default is float.
    axis : {0, 1}, optional
        If ``axis == 0``, apply to each row (reduce number of columns per row).
        Otherwise, apply to each column (reduce number of rows per column).
        Default is 0.

    Returns
    -------
    target_values : ndarray
        Summarized target values.
    """
    if isinstance(labels, str):
        fname = labels + '.csv'
        parc_pth = os.path.dirname(
            os.path.dirname(__file__)) + '/datasets/parcellations/' + fname
        labels = np.loadtxt(parc_pth, dtype=np.int)

    if axis == 1 and values.ndim == 1:
        axis = 0

    if target_labels is None:
        uq_tl = np.unique(labels)
        idx_back = None
    else:
        uq_tl, idx_back = np.unique(target_labels, return_inverse=True)

    if weights is not None:
        weights = np.atleast_2d(weights)

    v2d = np.atleast_2d(values)
    if axis == 1:
        v2d = v2d.T

    if isinstance(red_op, str):
        fred = _get_redop(red_op, weights=weights, axis=1)
    else:
        fred = red_op

    mapped = np.empty((v2d.shape[0], uq_tl.size), dtype=dtype)
    for ilab, lab in enumerate(uq_tl):
        mask = labels == lab
        wm = None if weights is None else weights[:, mask]

        if isinstance(red_op, str):
            mapped[:, ilab] = fred(v2d[:, mask], wm)

        else:
            for idx in range(v2d.shape[0]):
                mapped[idx, ilab] = fred(v2d[idx, mask], wm)

    if idx_back is not None:
        mapped = mapped[:, idx_back]

    if axis == 1:
        mapped = mapped.T

    if values.ndim == 1:
        return mapped[0]
    return mapped