def __init__(self, brand, counts, num_states=2, num_obs=3):
     
     self.mem = 20
     self.brand = brand
     self.counts = np.asanyarray(counts, dtype='i4')   
     self.N = num_states # number of hidden states
     self.K = num_obs    # number of possible observations
     # Initialize the transition probability matrix A.
     if self.N == 2:
         self.A = np.asanyarray([[0.9, 0.1],[0.1, 0.9]])
     
     else:
         A = np.random.uniform(size=(self.N,self.N))
         d = 1 / A.sum(axis=1)
         self.A = np.einsum('ij,i->ij', A, d)
         
     # Initialize hidden state probabilities, assume last position
     # is safety issue.
     if self.N == 2:
         self.pi = np.asanyarray([0.95, 0.05])
         
     else:
         pi = np.random.uniform(size=self.N)
         self.pi = pi / pi.sum()
         
     # Initialize the emission probability matrix B.
     if self. N == 2 and self.K == 3:
         self.B = np.asanyarray([[0.70, 0.2, 0.1], [0.01, 0.05, 0.94]])
         
     else:
         B = np.random.uniform(size=(self.N,self.K))
         d = 1 / B.sum(axis=1)
         self.B = np.einsum('ij,i->ij', B, d)
Beispiel #2
0
def m_outer(A,B):
    # Computes outer product over the last axes of A and B. The other
    # axes are broadcasted. Thus, if A has shape (..., N) and B has
    # shape (..., M), then the result has shape (..., N, M)
    A = np.asanyarray(A)
    B = np.asanyarray(B)
    return A[...,np.newaxis]*B[...,np.newaxis,:]
Beispiel #3
0
    def forward(self, state, action, Reward, state_dash, episode_end):
        num_of_batch = state.shape[0]
        s = Variable(state)
        s_dash = Variable(state_dash)

        Q = self.Q_func(s)  # Get Q-value
        # Generate Target Signals
        tmp2 = self.Q_func(s_dash)
        tmp2 = list(map(np.argmax, tmp2.data.get()))  # argmaxQ(s',a)
        tmp = self.Q_func_target(s_dash)  # Q'(s',*)
        tmp = list(tmp.data.get())
        # select Q'(s',*) due to argmaxQ(s',a)
        res1 = []
        for i in range(num_of_batch):
            res1.append(tmp[i][tmp2[i]])

        #max_Q_dash = np.asanyarray(tmp, dtype=np.float32)
        max_Q_dash = np.asanyarray(res1, dtype=np.float32)
        target = np.asanyarray(Q.data.get(), dtype=np.float32)
        for i in xrange(num_of_batch):
            if not episode_end[i][0]:
                tmp_ = np.sign(Reward[i]) + self.gamma * max_Q_dash[i]
            else:
                tmp_ = np.sign(Reward[i])

            action_index = self.action_to_index(action[i])
            target[i, action_index] = tmp_
        # TD-error clipping
        td = Variable(cuda.to_gpu(target)) - Q  # TD error
        td_tmp = td.data + 1000.0 * (abs(td.data) <= 1)  # Avoid zero division
        td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1)

        zero_val = Variable(cuda.to_gpu(np.zeros((self.replay_size, self.num_of_actions), dtype=np.float32)))
        loss = F.mean_squared_error(td_clip, zero_val)
        return loss, Q
Beispiel #4
0
def ravel_indices(indices, shape):
    """
    Convert nD to 1D indices for an array of given shape.
        flat_indices = ravel_indices(indices, size)
    
    :Input:
        indices: array of indices. Should be integer and have shape=([S],D), 
                 for S the "subshape" of indices array, pointing to a D dimensional array.
        shape:   shape of the nd-array these indices are pointing to (a tuple/list/ of length D)
        
    :Output: 
        flat_indices: an array of shape S
    
    :Note: 
       This is the opposite of unravel_indices: for any tuple 'shape'
          ind is equal to    ravel_indices(unravel_indices(ind,shape),shape)
                   and to  unravel_indices(  ravel_indices(ind,shape),shape)
    """
    dim_prod = _np.cumprod([1] + list(shape)[:0:-1])[_np.newaxis,::-1]
    ind = _np.asanyarray(indices)
    S   = ind.shape[:-1]
    K   = _np.asanyarray(shape).size
    ind.shape = S + (K,)
    
    return _np.sum(ind*dim_prod,-1)
Beispiel #5
0
 def eval(self, x, y):
     """Evaluate model at a given position ``(x, y)`` position.
     """
     x = np.asanyarray(x, dtype=float)
     y = np.asanyarray(y, dtype=float)
     parvals = self.parvals(x)
     return self._eval_y(y, parvals)
Beispiel #6
0
def barycentric_to_points(triangles, barycentric):
    """
    Convert a list of barycentric coordinates on a list of triangles
    to cartesian points.

    Parameters
    ------------
    triangles : (n, 3, 3) float
      Triangles in space
    barycentric : (n, 2) float
      Barycentric coordinates

    Returns
    -----------
    points : (m, 3) float
      Points in space
    """
    barycentric = np.asanyarray(barycentric, dtype=np.float64)
    triangles = np.asanyarray(triangles, dtype=np.float64)

    if not util.is_shape(triangles, (-1, 3, 3)):
        raise ValueError('Triangles must be (n,3,3)!')
    if barycentric.shape == (2,):
        barycentric = np.ones((len(triangles), 2),
                              dtype=np.float64) * barycentric
    if util.is_shape(barycentric, (len(triangles), 2)):
        barycentric = np.column_stack((barycentric,
                                       1.0 - barycentric.sum(axis=1)))
    elif not util.is_shape(barycentric, (len(triangles), 3)):
        raise ValueError('Barycentric shape incorrect!')

    barycentric /= barycentric.sum(axis=1).reshape((-1, 1))
    points = (triangles * barycentric.reshape((-1, 3, 1))).sum(axis=1)

    return points
Beispiel #7
0
    def fit(self, X, y, **params):
        """
        Fit Ridge regression model

        Parameters
        ----------
        X : numpy array of shape [n_samples,n_features]
            Training data
        y : numpy array of shape [n_samples]
            Target values

        Returns
        -------
        self : returns an instance of self.
        """
        self._set_params(**params)

        X = np.asanyarray(X, dtype=np.float)
        y = np.asanyarray(y, dtype=np.float)

        n_samples, n_features = X.shape

        X, y, Xmean, ymean = self._center_data(X, y)

        if n_samples > n_features:
            # w = inv(X^t X + alpha*Id) * X.T y
            self.coef_ = linalg.solve(np.dot(X.T, X) + self.alpha * np.eye(n_features), np.dot(X.T, y))
        else:
            # w = X.T * inv(X X^t + alpha*Id) y
            self.coef_ = np.dot(X.T, linalg.solve(np.dot(X, X.T) + self.alpha * np.eye(n_samples), y))

        self._set_intercept(Xmean, ymean)
        return self
Beispiel #8
0
def mean_tpr(predicted, target):
    """Mean True Positive Rate (TPR).

    Mean of estimates per each class
    """
    if len(predicted) != len(target) or not len(target):
        raise ValueError(
            "Both predicted and target should be of the same non-0 length")
    targets = set(predicted)
    targets.update(target)

    target = np.asanyarray(target)
    predicted = np.asanyarray(predicted)

    TPRs = []
    for t in targets:
        Pmask = target == t
        TPmask = predicted[Pmask] == t
        assert len(TPmask) <= len(Pmask),  "do not see how this could have been violated unless indexing is screwedup..."
        P = np.sum(Pmask)
        TP = np.sum(TPmask)
        if not P:
            # better be safe than sorry
            raise ValueError(
                "For target %s there were only predicted values, but no "
                "expected ones. TPR is undefined in this case, so use some "
                "other errorfx if you really need to deal with such data (or "
                "verify if your shuffling etc is correct)"
                % str(t)
            )
        TPRs.append(float(TP)/P)
    return np.mean(TPRs)
Beispiel #9
0
    def fit(self, X, y):
        """
        Fit linear model.

        Parameters
        ----------
        X : numpy array of shape [n_samples,n_features]
            Training data
        y : numpy array of shape [n_samples]
            Target values
        fit_intercept : boolean, optional
            wether to calculate the intercept for this model. If set
            to false, no intercept will be used in calculations
            (e.g. data is expected to be already centered).
        normalize : boolean, optional
            If True, the regressors X are normalized

        Returns
        -------
        self : returns an instance of self.
        """
        X = np.asanyarray(X)
        y = np.asanyarray(y)

        X, y, X_mean, y_mean, X_std = self._center_data(X, y,
                self.fit_intercept, self.normalize, self.copy_X)

        self.coef_, self.residues_, self.rank_, self.singular_ = \
                np.linalg.lstsq(X, y)

        self._set_intercept(X_mean, y_mean, X_std)
        return self
Beispiel #10
0
    def fit(self, X, y):
        """Fit Gaussian Naive Bayes according to X, y

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Training vectors, where n_samples is the number of samples
            and n_features is the number of features.

        y : array-like, shape = [n_samples]
            Target values.

        Returns
        -------
        self : object
            Returns self.
        """

        X = np.asanyarray(X)
        y = np.asanyarray(y)

        theta = []
        sigma = []
        class_prior = []
        unique_y = unique(y)
        for yi in unique_y:
            theta.append(np.mean(X[y == yi, :], 0))
            sigma.append(np.var(X[y == yi, :], 0))
            class_prior.append(np.float(np.sum(y == yi)) / np.size(y))
        self.theta = np.array(theta)
        self.sigma = np.array(sigma)
        self.class_prior = np.array(class_prior)
        self.unique_y = unique_y
        return self
Beispiel #11
0
    def __call__(self, projectables, **info):
        if len(projectables) != 2:
            raise ValueError("Expected 2 datasets, got %d" %
                             (len(projectables), ))

        # TODO: support datasets with palette to delegate this to the image
        # writer.

        data, palette = projectables
        palette = np.asanyarray(palette).squeeze() / 255.0
        colormap = self.build_colormap(palette, data.dtype, data.attrs)

        channels, colors = colormap.palettize(np.asanyarray(data.squeeze()))
        channels = palette[channels]
        fill_value = data.attrs.get('_FillValue', np.nan)
        if np.isnan(fill_value):
            mask = data.notnull()
        else:
            mask = data != data.attrs['_FillValue']
        r = xr.DataArray(channels[:, :, 0].reshape(data.shape),
                         dims=data.dims, coords=data.coords,
                         attrs=data.attrs).where(mask)
        g = xr.DataArray(channels[:, :, 1].reshape(data.shape),
                         dims=data.dims, coords=data.coords,
                         attrs=data.attrs).where(mask)
        b = xr.DataArray(channels[:, :, 2].reshape(data.shape),
                         dims=data.dims, coords=data.coords,
                         attrs=data.attrs).where(mask)

        res = super(PaletteCompositor, self).__call__((r, g, b), **data.attrs)
        res.attrs['_FillValue'] = np.nan
        return res
Beispiel #12
0
    def set_value(self, value, force=False):
        # Record new value and increment counter

        # Value can't be updated if observed=True
        if self.observed and not force:
            raise AttributeError('Stochastic '+self.__name__+'\'s value cannot be updated if observed flag is set')

        if self.verbose > 0:
            print_('\t' + self.__name__ + ': value set to ', value)

        # Save current value as last_value
        # Don't copy because caching depends on the object's reference.
        self.last_value = self._value

        if self.mask is None:

            if self.dtype.kind != 'O':
                self._value = asanyarray(value, dtype=self.dtype)
                self._value.flags['W']=False
            else:
                self._value = value

        else:

            new_value = self.value.copy()

            new_value[self.mask] = asanyarray(value, dtype=self.dtype)[self.mask]
            self._value = new_value

        self.counter.click()
Beispiel #13
0
def chi2(N_S, B, S, sigma2):
    r"""Chi-square statistic with user-specified variance.

     .. math::

         \chi^2 = \frac{(N_S - B - S) ^ 2}{\sigma ^ 2}

    Parameters
    ----------
    N_S : array_like
        Number of observed counts
    B : array_like
        Model background
    S : array_like
        Model signal
    sigma2 : array_like
        Variance

    Returns
    -------
    stat : ndarray
        Statistic per bin

    References
    ----------
    * Sherpa stats page (http://cxc.cfa.harvard.edu/sherpa/statistics/#chisq)
    """
    N_S = np.asanyarray(N_S, dtype=np.float64)
    B = np.asanyarray(B, dtype=np.float64)
    S = np.asanyarray(S, dtype=np.float64)
    sigma2 = np.asanyarray(sigma2, dtype=np.float64)

    stat = (N_S - B - S) ** 2 / sigma2

    return stat
Beispiel #14
0
    def get_loss(self, state, action, reward, state_prime, episode_end):
        s = Variable(cuda.to_gpu(state))
        s_dash = Variable(cuda.to_gpu(state_prime))

        q = self.model.q_function(s)  # Get Q-value

        # Generate Target Signals
        tmp = self.model_target.q_function(s_dash)  # Q(s',*)
        tmp = list(map(np.max, tmp.data))  # max_a Q(s',a)
        max_q_prime = np.asanyarray(tmp, dtype=np.float32)
        target = np.asanyarray(copy.deepcopy(q.data.get()), dtype=np.float32)

        for i in range(self.replay_size):
            if episode_end[i][0] is True:
                tmp_ = np.sign(reward[i])
            else:
                #  The sign of reward is used as the reward of DQN!
                tmp_ = np.sign(reward[i]) + self.gamma * max_q_prime[i]

            target[i, action[i]] = tmp_

        # TD-error clipping
        td = Variable(cuda.to_gpu(target)) - q  # TD error
        td_tmp = td.data + 1000.0 * (abs(td.data) <= 1)  # Avoid zero division
        td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1)

        zero_val = Variable(cuda.to_gpu(np.zeros((self.replay_size, self.n_act), dtype=np.float32)))
        loss = F.mean_squared_error(td_clip, zero_val)
        return loss, q
Beispiel #15
0
    def __init__(self, points, inside=None):
        r"""
        Parameters
        ----------
        points : An Nx3 array of (*x*, *y*, *z*) triples in vector space
            These points define the boundary of the polygon.  It must
            be "closed", i.e., the last point is the same as the first.

            It may contain zero points, in which it defines the null
            polygon.  It may not contain one, two or three points.
            Four points are needed to define a triangle, since the
            polygon must be closed.

        inside : An (*x*, *y*, *z*) triple, optional
            This point must be inside the polygon.  If not provided, the
            mean of the points will be used.
        """
        if len(points) == 0:
            # handle special case of initializing with an empty list of
            # vertices (ticket #1079).
            self._inside = np.zeros(3)
            self._points = np.asanyarray(points)
            return
        elif len(points) < 3:
            raise ValueError("Polygon made of too few points")
        else:
            assert np.array_equal(points[0], points[-1]), 'Polygon is not closed'

        self._points = points = np.asanyarray(points)

        if inside is None:
            self._inside = self._find_new_inside(points)
        else:
            self._inside = np.asanyarray(inside)
Beispiel #16
0
def eye(n, d=None):
    """ Creates an identity TT-matrix"""
    c = _matrix.matrix()
    c.tt = _vector.vector()
    if d is None:
        n0 = _np.asanyarray(n, dtype=_np.int32)
        c.tt.d = n0.size
    else:
        n0 = _np.asanyarray([n] * d, dtype=_np.int32)
        c.tt.d = d
    c.n = n0.copy()
    c.m = n0.copy()
    c.tt.n = (c.n) * (c.m)
    c.tt.r = _np.ones((c.tt.d + 1,), dtype=_np.int32)
    c.tt.get_ps()
    c.tt.alloc_core()
    for i in xrange(c.tt.d):
        c.tt.core[
        c.tt.ps[i] -
        1:c.tt.ps[
              i +
              1] -
          1] = _np.eye(
            c.n[i]).flatten()
    return c
Beispiel #17
0
def interp(x, xp, fp, left=None, right=None):
    """
    Like numpy.interp except for handling of running sequences of
    same values in `xp`.
    """
    x = numpy.asanyarray(x)
    xp = numpy.asanyarray(xp)
    fp = numpy.asanyarray(fp)

    if xp.shape != fp.shape:
        raise ValueError("xp and fp must have the same shape")

    ind = numpy.searchsorted(xp, x, side="right")
    fx = numpy.zeros(len(x))

    under = ind == 0
    over = ind == len(xp)
    between = ~under & ~over

    fx[under] = left if left is not None else fp[0]
    fx[over] = right if right is not None else fp[-1]

    if right is not None:
        # Fix points exactly on the right boundary.
        fx[x == xp[-1]] = fp[-1]

    ind = ind[between]

    df = (fp[ind] - fp[ind - 1]) / (xp[ind] - xp[ind - 1])

    fx[between] = df * (x[between] - xp[ind]) + fp[ind]

    return fx
Beispiel #18
0
def meanabs(x1, x2, axis=0):
    """mean absolute error

    Parameters
    ----------
    x1, x2 : array_like
       The performance measure depends on the difference between these two
       arrays.
    axis : int
       axis along which the summary statistic is calculated

    Returns
    -------
    meanabs : ndarray or float
       mean absolute difference along given axis.

    Notes
    -----
    If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
    This uses ``numpy.asanyarray`` to convert the input. Whether this is the
    desired result or not depends on the array subclass.

    """
    x1 = np.asanyarray(x1)
    x2 = np.asanyarray(x2)
    return np.mean(np.abs(x1-x2), axis=axis)
def pure_nugget(theta, d):
    """
    Spatial independence correlation model (pure nugget).
    (Useful when one wants to solve an ordinary least squares problem!)

                                         n
    theta, dx --> r(theta, dx) = 1 if   sum |dx_i| == 0
                                       i = 1
                                 0 otherwise

    Parameters
    ----------
    theta : array_like
        None.

    dx : array_like
        An array with shape (n_eval, n_features) giving the componentwise
        distances between locations x and x' at which the correlation model
        should be evaluated.

    Returns
    -------
    r : array_like
        An array with shape (n_eval, ) with the values of the autocorrelation
        model.
    """

    theta = np.asanyarray(theta, dtype=np.float)
    d = np.asanyarray(d, dtype=np.float)

    n_eval = d.shape[0]
    r = np.zeros(n_eval)
    r[np.all(d == 0.0, axis=1)] = 1.0

    return r
Beispiel #20
0
def kepler(k, r0, v0, tof):
    """Propagates orbit.

    This is a wrapper around kepler from ast2body.for.

    Parameters
    ----------
    k : float
        Gravitational constant of main attractor (km^3 / s^2).
    r0 : array
        Initial position (km).
    v0 : array
        Initial velocity (km).
    tof : float
        Time of flight (s).

    Raises
    ------
    RuntimeError
        If the status of the subroutine is not 'ok'.

    """
    r0 = np.asanyarray(r0).astype(np.float)
    v0 = np.asanyarray(v0).astype(np.float)
    tof = float(tof)
    assert r0.shape == (3,)
    assert v0.shape == (3,)
    r, v, error = _ast2body.kepler(r0, v0, tof, k)
    error = error.strip().decode('ascii')
    if error != 'ok':
        raise RuntimeError("There was an error: {}".format(error))
    return r, v
Beispiel #21
0
def rmse(x1, x2, axis=0):
    """root mean squared error

    Parameters
    ----------
    x1, x2 : array_like
       The performance measure depends on the difference between these two
       arrays.
    axis : int
       axis along which the summary statistic is calculated

    Returns
    -------
    rmse : ndarray or float
       root mean squared error along given axis.

    Notes
    -----
    If ``x1`` and ``x2`` have different shapes, then they need to broadcast.
    This uses ``numpy.asanyarray`` to convert the input. Whether this is the
    desired result or not depends on the array subclass, for example
    numpy matrices will silently produce an incorrect result.

    """
    x1 = np.asanyarray(x1)
    x2 = np.asanyarray(x2)
    return np.sqrt(mse(x1, x2, axis=axis))
    def fit(self, X, y):
        X = np.asanyarray(X, dtype='d')
        y = np.asanyarray(y, dtype='d')
        
        n = X.shape[0]
        num_dists = self.num_dists
        
        if self.num_dists > n:
            raise ParameterException('Number of distances is greater than ' + \
                    'num rows in X')

        if self.num_dists <= 0:
            self.R = None
        else:
            rand_idx = np.random.choice(X.shape[0], int(num_dists), replace=False)
            self.R = X[rand_idx]
            
            D = np.exp(-1.0 * ((cdist(X, self.R) ** 2) / (2 * (self.sigma ** 2))))
            X = np.hstack((X, D))

        #Un-comment for mrse code
        #X, y = mrse_transform(X, y)

        self.model = self.base_learner.fit(X, y)
        return self
Beispiel #23
0
    def fit(self, X, y, **params):
        """
        Fit linear model.

        Parameters
        ----------
        X : numpy array of shape [n_samples,n_features]
            Training data
        y : numpy array of shape [n_samples]
            Target values
        fit_intercept : boolean, optional
            wether to calculate the intercept for this model. If set
            to false, no intercept will be used in calculations
            (e.g. data is expected to be already centered).

        Returns
        -------
        self : returns an instance of self.
        """
        self._set_params(**params)
        X = np.asanyarray(X)
        y = np.asanyarray(y)

        X, y, Xmean, ymean = LinearModel._center_data(X, y, self.fit_intercept)

        self.coef_, self.residues_, self.rank_, self.singular_ = \
                np.linalg.lstsq(X, y)

        self._set_intercept(Xmean, ymean)
        return self
Beispiel #24
0
def background_error(n_off, alpha):
    r"""Estimate standard error on background
    in the on region from an off-region observation.

    .. math::

          \Delta\mu_{bkg} = \alpha \times \sqrt{n_{off}}

    Parameters
    ----------
    n_off : array_like
        Observed number of counts in the off region
    alpha : array_like
        On / off region exposure ratio for background events

    Returns
    -------
    background : ndarray
        Background estimate for the on region

    Examples
    --------
    >>> background_error(n_off=4, alpha=0.1)
    0.2
    >>> background_error(n_off=9, alpha=0.2)
    0.6
    """
    n_off = np.asanyarray(n_off, dtype=np.float64)
    alpha = np.asanyarray(alpha, dtype=np.float64)

    return alpha * sqrt(n_off)
Beispiel #25
0
def excess(n_on, n_off, alpha):
    r"""Estimate excess in the on region for an on-off observation.

    .. math::

          \mu_{excess} = n_{on} - \alpha \times n_{off}

    Parameters
    ----------
    n_on : array_like
        Observed number of counts in the on region
    n_off : array_like
        Observed number of counts in the off region
    alpha : array_like
        On / off region exposure ratio for background events

    Returns
    -------
    excess : ndarray
        Excess estimate for the on region

    Examples
    --------
    >>> excess(n_on=10, n_off=20, alpha=0.1)
    8.0
    >>> excess(n_on=4, n_off=9, alpha=0.5)
    -0.5
    """
    n_on = np.asanyarray(n_on, dtype=np.float64)
    n_off = np.asanyarray(n_off, dtype=np.float64)
    alpha = np.asanyarray(alpha, dtype=np.float64)

    return n_on - alpha * n_off
Beispiel #26
0
    def fit(self, X, y, **params):
        """Fit the model using X, y as training data

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Training data.

        y : array-like, shape = [n_samples]
            Target values, array of integer values.

        params : list of keyword, optional
            Overwrite keywords from __init__
        """
        X = np.asanyarray(X)
        if y is None:
            raise ValueError("y must not be None")
        self._y = np.asanyarray(y)
        self._set_params(**params)

        if self.algorithm == 'ball_tree' or \
           (self.algorithm == 'auto' and X.shape[1] < 20):
            self.ball_tree = BallTree(X, self.window_size)
        else:
            self.ball_tree = None
            self._fit_X = X
        return self
Beispiel #27
0
def background(n_off, alpha):
    r"""Estimate background in the on-region from an off-region observation.

    .. math::

        \mu_{background} = \alpha \times n_{off}

    Parameters
    ----------
    n_off : array_like
        Observed number of counts in the off region
    alpha : array_like
        On / off region exposure ratio for background events

    Returns
    -------
    background : ndarray
        Background estimate for the on region

    Examples
    --------
    >>> background(n_off=4, alpha=0.1)
    0.4
    >>> background(n_off=9, alpha=0.2)
    1.8
    """
    n_off = np.asanyarray(n_off, dtype=np.float64)
    alpha = np.asanyarray(alpha, dtype=np.float64)

    return alpha * n_off
Beispiel #28
0
    def predict(self, X, copy=True):
        """Apply the dimension reduction learned on the train data.
            Parameters
            ----------
            X: array-like of predictors, shape (n_samples, p)
                Training vectors, where n_samples in the number of samples and
                p is the number of predictors.

            copy: X has to be normalize, do it on a copy or in place
                with side effect!

            Notes
            -----
            This call require the estimation of a p x q matrix, which may
            be an issue in high dimensional space.
        """
        # Normalize
        if copy:
            Xc = (np.asanyarray(X) - self.x_mean_)
        else:
            X = np.asanyarray(X)
            Xc -= self.x_mean_
            Xc /= self.x_std_
        Ypred = np.dot(Xc, self.coefs)
        return Ypred + self.y_mean_
Beispiel #29
0
def unravel_indices(indices,shape):
    """
    Convert indices in a flatten array to nD indices of the array with given shape.
        nd_indices = unravel_indices(indices, shape)
    
    :Input:
        indices: array/list/tuple of flat indices. Should be integer, of any shape S
        shape:   nD shape of the array these indices are pointing to
        
    :Output: 
        nd_indices: a nd-array of shape [S]xK, where 
                    [S] is the shape of indices input argument
                    and K the size (number of element) of shape     
    
    :Note:
        The algorithm has been inspired from numpy.unravel_index 
        and can be seen as a generalization that manage set of indices
        However, it does not return tuples and no assertion is done on 
        the input indices before convertion:
        The output indices might be negative or bigger than the array size
        
        This is the opposite of ravel_indices:  for any tuple 'shape'
          ind is equal to    ravel_indices(unravel_indices(ind,shape),shape)
                   and to  unravel_indices(  ravel_indices(ind,shape),shape)
    """

    dim_prod = _np.cumprod([1] + list(shape)[:0:-1])[::-1]
    ind = _np.asanyarray(indices)
    S   = ind.shape
    K   = _np.asanyarray(shape).size
    
    ndInd = ind.ravel()[:,_np.newaxis]/dim_prod % shape
    ndInd.shape = S + (K,)
    return ndInd
Beispiel #30
0
def rv2coe(k, r, v):
    """Converts r, v to classical orbital elements.

    This is a wrapper around rv2coe from ast2body.for.

    Parameters
    ----------
    k : float
        Standard gravitational parameter (km^3 / s^2).
    r : array
        Position vector (km).
    v : array
        Velocity vector (km / s).

    Examples
    --------
    Vallado 2001, example 2-5
    >>> r = [6524.834, 6862.875, 6448.296]
    >>> v = [4.901327, 5.533756, -1.976341]
    >>> k = 3.986e5
    >>> rv2coe(k, r, v)
    (36127.55012131963, 0.83285427644495158, 1.5336055626394494,
    3.9775750028016947, 0.93174413995595795, 1.6115511711293014)

    """
    # TODO: Extend for additional arguments arglat, truelon, lonper
    r = np.asanyarray(r).astype(np.float)
    v = np.asanyarray(v).astype(np.float)
    _, a, ecc, inc, omega, argp, nu, _, _, _, _ = _ast2body.rv2coe(r, v, k)
    coe = np.vstack((a, ecc, inc, omega, argp, nu))
    if coe.shape[-1] == 1:
        coe = coe[:, 0]
    return coe
Beispiel #31
0
    def to_planar(self,
                  to_2D=None,
                  normal=None,
                  check=True):
        """
        Check to see if current vectors are all coplanar.

        If they are, return a Path2D and a transform which will
        transform the 2D representation back into 3 dimensions

        Parameters
        -----------
        to_2D: (4,4) float
            Homogeneous transformation matrix to apply,
            If not passed a plane will be fitted to vertices.
        normal: (3,) float, or None
           Approximate normal of direction of plane
           If to_2D is not specified sign
           will be applied to fit plane normal
        check:  bool
            If True: Raise a ValueError if
            points aren't coplanar

        Returns
        -----------
        planar : trimesh.path.Path2D
                   Current path transformed onto plane
        to_3D :  (4,4) float
                   Homeogenous transformations to move planar
                   back into 3D space
        """
        # which vertices are actually referenced
        referenced = self.referenced_vertices
        # if nothing is referenced return an empty path
        if len(referenced) == 0:
            return Path2D(), np.eye(4)

        # no explicit transform passed
        if to_2D is None:
            # fit a plane to our vertices
            C, N = plane_fit(self.vertices[referenced])
            # apply the normal sign hint
            if normal is not None:
                normal = np.asanyarray(normal, dtype=np.float64)
                if normal.shape == (3,):
                    N *= np.sign(np.dot(N, normal))
                    N = normal
                else:
                    log.warning(
                        "passed normal not used: {}".format(
                            normal.shape))
            # create a transform from fit plane to XY
            to_2D = plane_transform(origin=C,
                                    normal=N)

        # make sure we've extracted a transform
        to_2D = np.asanyarray(to_2D, dtype=np.float64)
        if to_2D.shape != (4, 4):
            raise ValueError('unable to create transform!')

        # transform all vertices to 2D plane
        flat = tf.transform_points(self.vertices,
                                   to_2D)

        # Z values of vertices which are referenced
        heights = flat[referenced][:, 2]
        # points are not on a plane because Z varies
        if heights.ptp() > tol.planar:
            # since Z is inconsistent set height to zero
            height = 0.0
            if check:
                raise ValueError('points are not flat!')
        else:
            # if the points were planar store the height
            height = heights.mean()

        # the transform from 2D to 3D
        to_3D = np.linalg.inv(to_2D)

        # if the transform didn't move the path to
        # exactly Z=0 adjust it so the returned transform does
        if np.abs(height) > tol.planar:
            # adjust to_3D transform by height
            adjust = tf.translation_matrix(
                [0, 0, height])
            # apply the height adjustment to_3D
            to_3D = np.dot(to_3D, adjust)

        # copy metadata to new object
        metadata = copy.deepcopy(self.metadata)
        # store transform we used to move it onto the plane
        metadata['to_3D'] = to_3D

        # create the Path2D with the same entities
        # and XY values of vertices projected onto the plane
        planar = Path2D(entities=copy.deepcopy(self.entities),
                        vertices=flat[:, :2],
                        metadata=metadata,
                        process=False)

        return planar, to_3D
 def predict(self, X):
     """Linear model prediction: compute the dot product with the weights"""
     X = np.asanyarray(X)
     return np.dot(X, self.coef_)
Beispiel #33
0
def lagmat2ds(x, maxlag0, maxlagex=None, dropex=0, trim='forward',
              use_pandas=False):
    """
    Generate lagmatrix for 2d array, columns arranged by variables

    Parameters
    ----------
    x : array_like, 2d
        2d data, observation in rows and variables in columns
    maxlag0 : int
        for first variable all lags from zero to maxlag are included
    maxlagex : None or int
        max lag for all other variables all lags from zero to maxlag are included
    dropex : int (default is 0)
        exclude first dropex lags from other variables
        for all variables, except the first, lags from dropex to maxlagex are
        included
    trim : string
        * 'forward' : trim invalid observations in front
        * 'backward' : trim invalid initial observations
        * 'both' : trim invalid observations on both sides
        * 'none' : no trimming of observations
    use_pandas : bool, optional
        If true, returns a DataFrame when the input is a pandas
        Series or DataFrame.  If false, return numpy ndarrays.

    Returns
    -------
    lagmat : 2d array
        array with lagged observations, columns ordered by variable

    Notes
    -----
    Inefficient implementation for unequal lags, implemented for convenience
    """

    if maxlagex is None:
        maxlagex = maxlag0
    maxlag = max(maxlag0, maxlagex)
    is_pandas = _is_using_pandas(x, None)

    if x.ndim == 1:
        if is_pandas:
            x = pd.DataFrame(x)
        else:
            x = x[:, None]
    elif x.ndim == 0 or x.ndim > 2:
        raise TypeError('Only supports 1 and 2-dimensional data.')

    nobs, nvar = x.shape

    if is_pandas and use_pandas:
        lags = lagmat(x.iloc[:, 0], maxlag, trim=trim,
                      original='in', use_pandas=True)
        lagsli = [lags.iloc[:, :maxlag0 + 1]]
        for k in range(1, nvar):
            lags = lagmat(x.iloc[:, k], maxlag, trim=trim,
                          original='in', use_pandas=True)
            lagsli.append(lags.iloc[:, dropex:maxlagex + 1])
        return pd.concat(lagsli, axis=1)
    elif is_pandas:
        x = np.asanyarray(x)

    lagsli = [lagmat(x[:, 0], maxlag, trim=trim, original='in')[:, :maxlag0 + 1]]
    for k in range(1, nvar):
        lagsli.append(lagmat(x[:, k], maxlag, trim=trim, original='in')[:, dropex:maxlagex + 1])
    return np.column_stack(lagsli)
Beispiel #34
0
def add_trend(x, trend="c", prepend=False, has_constant='skip'):
    """
    Adds a trend and/or constant to an array.

    Parameters
    ----------
    X : array-like
        Original array of data.
    trend : str {"c","t","ct","ctt"}
        "c" add constant only
        "t" add trend only
        "ct" add constant and linear trend
        "ctt" add constant and linear and quadratic trend.
    prepend : bool
        If True, prepends the new data to the columns of X.
    has_constant : str {'raise', 'add', 'skip'}
        Controls what happens when trend is 'c' and a constant already
        exists in X. 'raise' will raise an error. 'add' will duplicate a
        constant. 'skip' will return the data without change. 'skip' is the
        default.

    Returns
    -------
    y : array, recarray or DataFrame
        The original data with the additional trend columns.  If x is a
        recarray or pandas Series or DataFrame, then the trend column names
        are 'const', 'trend' and 'trend_squared'.

    Notes
    -----
    Returns columns as ["ctt","ct","c"] whenever applicable. There is currently
    no checking for an existing trend.

    See also
    --------
    statsmodels.tools.tools.add_constant
    """
    # TODO: could be generalized for trend of aribitrary order
    trend = trend.lower()
    columns = ['const', 'trend', 'trend_squared']
    if trend == "c":  # handles structured arrays
        columns = columns[:1]
        trendorder = 0
    elif trend == "ct" or trend == "t":
        columns = columns[:2]
        if trend == "t":
            columns = columns[1:2]
        trendorder = 1
    elif trend == "ctt":
        trendorder = 2
    else:
        raise ValueError("trend %s not understood" % trend)

    is_recarray = _is_recarray(x)
    is_pandas = _is_using_pandas(x, None) or is_recarray
    if is_pandas or is_recarray:
        if is_recarray:
            descr = x.dtype.descr
            x = pd.DataFrame.from_records(x)
        elif isinstance(x, pd.Series):
            x = pd.DataFrame(x)
        else:
            x = x.copy()
    else:
        x = np.asanyarray(x)

    nobs = len(x)
    trendarr = np.vander(np.arange(1, nobs + 1, dtype=np.float64), trendorder + 1)
    # put in order ctt
    trendarr = np.fliplr(trendarr)
    if trend == "t":
        trendarr = trendarr[:, 1]

    if "c" in trend:
        if is_pandas or is_recarray:
            # Mixed type protection
            def safe_is_const(s):
                try:
                    return np.ptp(s) == 0.0 and np.any(s != 0.0)
                except:
                    return False
            col_const = x.apply(safe_is_const, 0)
        else:
            col_const = np.logical_and(np.any(np.ptp(np.asanyarray(x), axis=0) == 0, axis=0),
                                       np.all(x != 0.0, axis=0))
        if np.any(col_const):
            if has_constant == 'raise':
                raise ValueError("x already contains a constant")
            elif has_constant == 'skip':
                columns = columns[1:]
                trendarr = trendarr[:, 1:]

    order = 1 if prepend else -1
    if is_recarray or is_pandas:
        trendarr = pd.DataFrame(trendarr, index=x.index, columns=columns)
        x = [trendarr, x]
        x = pd.concat(x[::order], 1)
    else:
        x = [trendarr, x]
        x = np.column_stack(x[::order])

    if is_recarray:
        x = x.to_records(index=False)
        new_descr = x.dtype.descr
        extra_col = len(new_descr) - len(descr)
        if prepend:
            descr = new_descr[:extra_col] + descr
        else:
            descr = descr + new_descr[-extra_col:]

        if not PY3:
            # See 3658
            names = [entry[0] for entry in descr]
            dtypes = [entry[1] for entry in descr]
            names = [bytes(name) for name in names]
            # Fail loudly if there is a non-ascii name
            descr = list(zip(names, dtypes))

        x = x.astype(np.dtype(descr))

    return x
Beispiel #35
0
cv2.createTrackbar('Alpha', 'Aligned_color_depth', 0, alpha_slider_max,
                   nothing)

try:
    while True:

        frames = pipeline.wait_for_frames()
        aligned_frames = align.process(frames)

        aligned_depth_frame = aligned_frames.get_depth_frame()
        color_frame = aligned_frames.get_color_frame()

        if not aligned_depth_frame or not color_frame:
            continue

        depth_image = np.asanyarray(aligned_depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        grey_color = 0
        depth_image_3d = np.dstack((depth_image, depth_image, depth_image))
        bg_removed = np.where(
            (depth_image_3d > clipping_distance) | (depth_image_3d <= 0),
            grey_color, color_image)

        depth_colormap = cv2.applyColorMap(
            cv2.convertScaleAbs(depth_image, alpha=0.08), cv2.COLORMAP_JET)

        alpha, beta = a_b(cv2.getTrackbarPos('Alpha', 'Aligned_color_depth'))

        dst = cv2.addWeighted(color_image, alpha, depth_colormap, beta, 0.0)
Beispiel #36
0
def splited_with_model(model_name, big_X_train, big_y_train, big_X_test, big_y_test,
                        class_names=class_names, ch_num=25, ep=50, addon=''):
    ###
    ### First create the sequence of training users and single test user
    ###
    list_element = []
    my_list = [0, 1, 2, 3, 4, 5, 6, 7]
    series = []

    for idx in range(72):
        if idx % 9 != 0:
            list_element.append(my_list[idx % len(my_list)])
        elif idx % 9 == 0:
            series.append(list_element)
            list_element = []
    series[0] = list_element

    metrics = np.zeros(((len(my_list)), 4))
    ###
    ### Iterate trough the sequences
    ###
    r_series = reversed(series)
    for user_list in r_series:
        print('Starting Splited learning for user {}'.format(user_list[-1]))

        ###
        ### Only create the second training data (of the specific user)
        ###
        temp = [item for sublist in big_X_train[user_list[-1]] for item in sublist]
        temp = np.asanyarray(temp)
        temp = np.swapaxes(temp, 1, 2)
        features_train_2 = temp.reshape(temp.shape[0], 1, ch_num, 1125)
        lab = [item for sublist in big_y_train[user_list[-1]] for item in sublist]
        # encode class values as integers
        encoder = LabelEncoder()
        encoder.fit(lab)
        encoded_Y = encoder.transform(lab)
        # convert integers to dummy variables (i.e. one hot encoded)
        labels_train_2 = np_utils.to_categorical(encoded_Y)

        ###
        ### Create the testing data (of the specific user)
        ###
        temp = [item for sublist in big_X_test[user_list[-1]] for item in sublist]
        temp = np.asanyarray(temp)
        temp = np.swapaxes(temp, 1, 2)
        features_test_2 = temp.reshape(temp.shape[0], 1, ch_num, 1125)
        lab = [item for sublist in big_y_test[user_list[-1]] for item in sublist]
        # encode class values as integers
        encoder = LabelEncoder()
        encoder.fit(lab)
        encoded_Y = encoder.transform(lab)
        # convert integers to dummy variables (i.e. one hot encoded)
        labels_test_2 = np_utils.to_categorical(encoded_Y)

        filename_ = '{0}{1}{2}{3}'.format(model_name, '_Split_{}'.format(user_list[-1] + 1), addon,
                                          '_{}_Epochs'.format(str(ep)))

        model_file = 'models/{0}{1}_50_Epochs{2}.h5'.format(model_name, 'Freezing_{}'.format(user_list[-1] + 1),
                                                            '_-2_Frozen')

        metrics[user_list[-1]] = split_unit_with_model(model_file, features_train_2, features_test_2,
                                                          labels_train_2, labels_test_2,
                                                          filename_, class_names, ep)

        del features_train_2, features_test_2, \
            labels_train_2, labels_test_2

    metrics_to_csv(metrics, '{}_Split_Learning_{}'.format(model_name, filename_))
Beispiel #37
0
def splitted_layers(model_name, big_X_train, big_y_train, big_X_test, big_y_test,
                    class_names=class_names, ch_num=25, dr=0.1, addon=''):
    ###
    ### First create the sequence of training users and single test user
    ###
    list_element = []
    my_list = [0, 1, 2, 3, 4, 5, 6, 7]
    series = []

    for idx in range(72):
        if idx % 9 != 0:
            list_element.append(my_list[idx % len(my_list)])
        elif idx % 9 == 0:
            series.append(list_element)
            list_element = []
    series[0] = list_element

    features_train = []
    labels_train = []

    metrics = np.zeros(((len(my_list)), 4))
    ###
    ### Iterate trough the sequences
    ###
    for user_list in series:
        print('Starting Splitted learning for user {}'.format(user_list[-1]))

        model = EEGNet_org(nb_classes=4, Chans=ch_num, Samples=1125, dropoutRate=dr)

        model.compile(loss=categorical_crossentropy,
                      optimizer=Adam(), metrics=['accuracy'])

        # Create the first training data
        for i in user_list[:-1]:
            temp = [item for sublist in big_X_train[i] for item in sublist]
            temp = np.asanyarray(temp)
            temp = np.swapaxes(temp, 1, 2)
            features_train.append(temp.reshape(temp.shape[0], 1, ch_num, 1125))
            lab = [item for sublist in big_y_train[i] for item in sublist]
            # encode class values as integers
            encoder = LabelEncoder()
            encoder.fit(lab)
            encoded_Y = encoder.transform(lab)
            # convert integers to dummy variables (i.e. one hot encoded)
            labels_train.append(np_utils.to_categorical(encoded_Y))

            # Also add the testing data to increase the size of training data
            temp = [item for sublist in big_X_test[i] for item in sublist]
            temp = np.asanyarray(temp)
            temp = np.swapaxes(temp, 1, 2)
            features_train.append(temp.reshape(temp.shape[0], 1, ch_num, 1125))
            lab = [item for sublist in big_y_test[i] for item in sublist]
            # encode class values as integers
            encoder = LabelEncoder()
            encoder.fit(lab)
            encoded_Y = encoder.transform(lab)
            # convert integers to dummy variables (i.e. one hot encoded)
            labels_train.append(np_utils.to_categorical(encoded_Y))
        ###
        ### Create the second training data (of the specific user)
        ###
        temp = [item for sublist in big_X_train[user_list[-1]] for item in sublist]
        temp = np.asanyarray(temp)
        temp = np.swapaxes(temp, 1, 2)
        features_train_2 = temp.reshape(temp.shape[0], 1, ch_num, 1125)
        lab = [item for sublist in big_y_train[user_list[-1]] for item in sublist]
        # encode class values as integers
        encoder = LabelEncoder()
        encoder.fit(lab)
        encoded_Y = encoder.transform(lab)
        # convert integers to dummy variables (i.e. one hot encoded)
        labels_train_2 = np_utils.to_categorical(encoded_Y)

        # Flatten the data for training
        full_features = np.vstack(features_train)
        full_labels = np.vstack(labels_train)
        ###
        ### Create the testing data (of the specific user)
        ###
        temp = [item for sublist in big_X_test[user_list[-1]] for item in sublist]
        temp = np.asanyarray(temp)
        temp = np.swapaxes(temp, 1, 2)
        features_test_2 = temp.reshape(temp.shape[0], 1, ch_num, 1125)
        lab = [item for sublist in big_y_test[user_list[-1]] for item in sublist]
        # encode class values as integers
        encoder = LabelEncoder()
        encoder.fit(lab)
        encoded_Y = encoder.transform(lab)
        # convert integers to dummy variables (i.e. one hot encoded)
        labels_test_2 = np_utils.to_categorical(encoded_Y)

        filename_ = '{0}{1}{2}'.format(model_name, 'Splitted_{}'.format(user_list[-1] + 1), addon)

        metrics[user_list[-1]] = freezing_unit(model, full_features, full_labels,
                                               features_train_2, features_test_2,
                                               labels_train_2, labels_test_2,
                                               filename_, class_names)
    metrics_to_csv(metrics, '{}_Splitted_Learning_{}'.format(model_name, filename_))
Beispiel #38
0
    def __mul__(self, other):
        """interpret other and call one of the following

        self._mul_scalar()
        self._mul_vector()
        self._mul_multivector()
        self._mul_sparse_matrix()
        """

        M, N = self.shape

        if other.__class__ is np.ndarray:
            # Fast path for the most common case
            if other.shape == (N, ):
                return self._mul_vector(other)
            elif other.shape == (N, 1):
                return self._mul_vector(other.ravel()).reshape(M, 1)
            elif other.ndim == 2 and other.shape[0] == N:
                return self._mul_multivector(other)

        if isscalarlike(other):
            # scalar value
            return self._mul_scalar(other)

        if issparse(other):
            if self.shape[1] != other.shape[0]:
                raise ValueError('dimension mismatch')
            return self._mul_sparse_matrix(other)

        try:
            other.shape
        except AttributeError:
            # If it's a list or whatever, treat it like a matrix
            other = np.asanyarray(other)

        other = np.asanyarray(other)

        if other.ndim == 1 or other.ndim == 2 and other.shape[1] == 1:
            # dense row or column vector
            if other.shape != (N, ) and other.shape != (N, 1):
                raise ValueError('dimension mismatch')

            result = self._mul_vector(np.ravel(other))

            if isinstance(other, np.matrix):
                result = np.asmatrix(result)

            if other.ndim == 2 and other.shape[1] == 1:
                # If 'other' was an (nx1) column vector, reshape the result
                result = result.reshape(-1, 1)

            return result

        elif other.ndim == 2:
            ##
            # dense 2D array or matrix ("multivector")

            if other.shape[0] != self.shape[1]:
                raise ValueError('dimension mismatch')

            result = self._mul_multivector(np.asarray(other))

            if isinstance(other, np.matrix):
                result = np.asmatrix(result)

            return result
        else:
            raise ValueError('could not interpret dimensions')
Beispiel #39
0
import cv2
import yaml
import numpy as np

import cv2
fs = cv2.FileStorage("Output_fast.yaml", cv2.FILE_STORAGE_READ)
fn = fs.getNode("Trayectoria")
data = np.asanyarray(fn.mat())
print(fn.mat())
print(data[0])
Beispiel #40
0
 def entities(self, values):
     if values is None:
         self._entities = np.array([])
     else:
         self._entities = np.asanyarray(values)
    xytext=(420, 0.06),
    textcoords='data',
    arrowprops=dict(arrowstyle="->", connectionstyle="arc"),
)
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')

# Get Feature Importance from the classifier
feature_importance = classifier.feature_importances_
# Normalize The Features
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.figure(figsize=(16, 12))
plt.barh(pos, feature_importance[sorted_idx], align='center', color='#7A68A6')
plt.yticks(pos, np.asanyarray(california_housing_feature_names)[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()

features = [
    'MedInc', 'AveOccup', 'HouseAge', 'AveRooms', ('AveOccup', 'HouseAge')
]
fig, ax = ensemble.partial_dependence.plot_partial_dependence(
    classifier,
    X_train,
    features,
    feature_names=california_housing_feature_names,
    figsize=(16, 12))
Beispiel #42
0
    def __setitem__(self, idxs, val):
        assert isinstance(
            idxs, tuple
        ), "Assigning to HDF5_Dict requires a tuple of (object_name, feature_name, integer)"
        assert isinstance(idxs[0], basestring) and isinstance(
            idxs[1], basestring), "First two indices must be of type str."
        assert isinstance(
            idxs[2],
            int) and idxs[2] >= 0, "Third index must be a non-negative integer"

        object_name, feature_name, num_idx = idxs
        full_name = '%s.%s' % (idxs[0], idxs[1])
        feature_exists = self.has_feature(object_name, feature_name)
        assert (not self.must_exist) or feature_exists, \
            "Attempted storing new feature %s, but must_exist=True" % (full_name)

        if not feature_exists:
            if not self.has_object(object_name):
                self.add_object(object_name)
            self.add_feature(object_name, feature_name)

        # find the destination for the data, and check that its
        # the right size for the values.  This may extend the
        # _index and data arrays. It may also overwrite the old value.
        dest = self.find_index_or_slice(idxs, val)

        with self.lock:
            dataset = self.get_dataset(object_name, feature_name)
            if dataset.dtype.kind == 'i':
                if np.asanyarray(val).dtype.kind == 'f':
                    # it's possible we have only stored integers and now need to promote to float
                    if dataset.shape[0] > 0:
                        vals = dataset[:].astype(float)
                    else:
                        vals = np.array([])
                    del self.top_group[object_name][feature_name]['data']
                    dataset = self.top_group[object_name][
                        feature_name].create_dataset('data', (vals.size, ),
                                                     dtype=float,
                                                     compression='gzip',
                                                     shuffle=True,
                                                     chunks=(self.chunksize, ),
                                                     maxshape=(None, ))
                    if vals.size > 0:
                        dataset[:] = vals
                elif np.asanyarray(val).dtype.kind in ('S', 'a', 'U'):
                    # we created the dataset without any data, so didn't know the type before
                    sz = dataset.shape[0]
                    del self.top_group[object_name][feature_name]['data']
                    dataset = self.top_group[object_name][
                        feature_name].create_dataset(
                            'data', (sz, ),
                            dtype=h5py.special_dtype(vlen=str),
                            compression='gzip',
                            shuffle=True,
                            chunks=(self.chunksize, ),
                            maxshape=(None, ))

            if np.isscalar(val):
                dataset[dest] = val
            else:
                dataset[dest] = np.asanyarray(val).ravel()
Beispiel #43
0
def _select(input,
            labels=None,
            index=None,
            find_min=False,
            find_max=False,
            find_min_positions=False,
            find_max_positions=False):
    '''returns min, max, or both, plus positions if requested'''

    find_positions = find_min_positions or find_max_positions
    positions = None
    if find_positions:
        positions = numpy.arange(input.size).reshape(input.shape)

    def single_group(vals, positions):
        result = []
        if find_min:
            result += [vals.min()]
        if find_min_positions:
            result += [positions[vals == vals.min()][0]]
        if find_max:
            result += [vals.max()]
        if find_max_positions:
            result += [positions[vals == vals.max()][0]]
        return result

    if labels is None:
        return single_group(input, positions)

    # ensure input and labels match sizes
    input, labels = numpy.broadcast_arrays(input, labels)

    if index is None:
        mask = (labels > 0)
        masked_positions = None
        if find_positions:
            masked_positions = positions[mask]
        return single_group(input[mask], masked_positions)

    if numpy.isscalar(index):
        mask = (labels == index)
        masked_positions = None
        if find_positions:
            masked_positions = positions[mask]
        return single_group(input[mask], masked_positions)

    order = input.ravel().argsort()
    input = input.ravel()[order]
    labels = labels.ravel()[order]
    if find_positions:
        positions = positions.ravel()[order]

    # remap labels to unique integers if necessary, or if the largest
    # label is larger than the number of values.
    if ((not numpy.issubdtype(labels.dtype, numpy.int)) or (labels.min() < 0)
            or (labels.max() > labels.size)):
        # remap labels, and indexes
        unique_labels, labels = numpy.unique1d(labels, return_inverse=True)
        idxs = numpy.searchsorted(unique_labels, index)

        # make all of idxs valid
        idxs[idxs >= unique_labels.size] = 0
        found = (unique_labels[idxs] == index)
    else:
        # labels are an integer type, and there aren't too many.
        idxs = numpy.asanyarray(index, numpy.int).copy()
        found = (idxs >= 0) & (idxs <= labels.max())

    idxs[~found] = labels.max() + 1

    result = []
    if find_min:
        mins = numpy.zeros(labels.max() + 2, input.dtype)
        mins[labels[::-1]] = input[::-1]
        result += [mins[idxs]]
    if find_min_positions:
        minpos = numpy.zeros(labels.max() + 2)
        minpos[labels[::-1]] = positions[::-1]
        result += [minpos[idxs]]
    if find_max:
        maxs = numpy.zeros(labels.max() + 2, input.dtype)
        maxs[labels] = input
        result += [maxs[idxs]]
    if find_max_positions:
        maxpos = numpy.zeros(labels.max() + 2)
        maxpos[labels] = positions
        result += [maxpos[idxs]]
    return result
Beispiel #44
0
def ai():
    # Configure depth and color streams
    test_img = cv2.imread('testImag.jpg')
    pipeline = rs.pipeline()
    config = rs.config()
    colorizer = rs.colorizer()
    config.enable_stream(rs.stream.depth, 480, 270, rs.format.z16, 15)
    config.enable_stream(rs.stream.color, 424, 240, rs.format.bgr8, 15)
    model_file = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
    labels = read_label_file('coco_labels.txt')
    interpreter = make_interpreter(model_file)
    interpreter.allocate_tensors()
    ##    align = rs.align(rs.stream.color)
    dist_thres_cm = 180  # cm
    max_depth_m = 8
    min_depth_m = 0.1

    confThreshold = 0.65
    confidence = 0

    off_ul = 15
    off_vl = 10
    off_ur = 25
    off_vr = 15

    corn_1 = (72, 44)
    corn_2 = (168, 92)
    cornColor_1 = (corn_1[0] - off_ul, corn_1[1] - off_vl
                   )  # compensate # FOV cameras
    cornColor_2 = (corn_2[0] + off_ur, corn_2[1] + off_vr)

    filters = [[True, "Decimation Filter",
                rs.decimation_filter()],
               [True, "Threshold Filter",
                rs.threshold_filter()],
               [True, "Depth to Disparity",
                rs.disparity_transform(True)],
               [True, "Spatial Filter",
                rs.spatial_filter()],
               [True, "Temporal Filter",
                rs.temporal_filter()],
               [True, "Hole Filling Filter",
                rs.hole_filling_filter(True)],
               [True, "Disparity to Depth",
                rs.disparity_transform(False)]]

    if filters[1][0] is True:
        filters[1][2].set_option(rs.option.min_distance, min_depth_m)
        filters[1][2].set_option(rs.option.max_distance, max_depth_m)

    # Start streaming
    profile = pipeline.start(config)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()
    progress("INFO: Depth scale is: %.4f" % depth_scale)

    progress("INFO: video recording")
    out = cv2.VideoWriter('avcnet.avi', cv2.VideoWriter_fourcc(*'XVID'), 15,
                          (240, 136))

    # default parameters
    orient = 0
    tim_old = 0.1
    state = 'fly'
    global velocity_y
    velocity_y = 0
    fly_1 = 400
    k = 0.66  # k=(tau/T)/(1+tau/T) tau time constant LPF, T period
    # k=0 # no filtering
    fps = FPS().start()
    # Start streaming

    try:
        while True:

            # Wait for a coherent pair of frames: depth and color
            frames = pipeline.wait_for_frames()
            ##            frames = align.process(frames)
            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()
            if not depth_frame or not color_frame:
                continue
            # Apply the filters
            filtered_frame = depth_frame
            for i in range(len(filters)):
                if filters[i][0] is True:
                    filtered_frame = filters[i][2].process(filtered_frame)

            # Extract depth in matrix form
            depth_data = filtered_frame.as_frame().get_data()
            depth_mat = np.asanyarray(depth_data)  # shape 136,240

            # Convert images to numpy arrays
            # output_image = np.asanyarray(colorizer.colorize(filtered_frame).get_data()) #shape: 136,240,3
            color_image = np.asanyarray(color_frame.get_data())

            # calculate distance
            distances = distances_depth_image(depth_mat, min_depth_m,
                                              max_depth_m, depth_scale, corn_1,
                                              corn_2)

            # Stack both images horizontally
            # output_color = cv2.resize(color_image, (240, 136))
            if color_frame:
                imag = cv2.resize(color_image, (300, 300))
                common.set_input(interpreter, imag)
                interpreter.invoke()
                scale = (1, 1)
                objects = detect.get_objects(interpreter, confThreshold, scale)
                data_out = []
                if objects:
                    for obj in objects:
                        inference = []  # clear inference
                        box = obj.bbox
                        inference.extend((obj.id, obj.score, box))
                        # print('inference:',inference)
                        data_out.append(
                            inference)  # list of all detected objects
                    # print('data_out:',data_out)
                    objID = data_out[0][
                        0]  # object with largest confidence selected
                    confidence = data_out[0][1]
                    labeltxt = labels[objID]
                    box = data_out[0][2]
                    if confidence > confThreshold:
                        draw_rect(imag,
                                  box,
                                  confidence,
                                  labeltxt,
                                  use_normalized_coordinates=False)

            output_color = cv2.resize(imag, (240, 136))
            # cv2.rectangle(output_image, corn_1, corn_2, (0, 255, 0), thickness=2)
            cv2.rectangle(output_color,
                          cornColor_1,
                          cornColor_2, (0, 255, 0),
                          thickness=2)

            #===========================================================================
            # classify the input image
            fly = np.min(distances)  # distance in cm
            left = (distances[0:24] <
                    dist_thres_cm).sum()  # object is on the left side
            right = (distances[24:48] <
                     dist_thres_cm).sum()  # object is on the right side
            stop = ((distances[0:48] < dist_thres_cm).sum() == 48) * 48
            # build the label
            my_dict = {'left': left, 'right': right, 'stop': stop}
            maxPair = max(my_dict.items(), key=itemgetter(1))
            fly_f = k * fly_1 + (1 - k) * fly
            fly_1 = fly_f
            proba = int(fly_f)

            if state == 'avoid':
                if fly_f >= dist_thres_cm + 10:
                    state = 'fly'
                    print(state, int(fly_f), file=f)

            else:
                label = 'forward'
                # proba=fly_f
                if fly_f <= dist_thres_cm:
                    label = maxPair[0]
                    # proba=int(fly_f)
                    print(my_dict, int(fly_f), file=f)
                    state = 'avoid'

            label_1 = "{} {} {}".format(label, proba, state)
            # draw the label on the image
            cv2.putText(output_color, label_1, (10, 25),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
            # Write the frame into the file 'output.avi'
            out.write(output_color)

            if vehicle.channels['8'] > 1400:
                if state == "fly":
                    event.clear()

                if state == "avoid":
                    event.set()

                    if label == 'left':
                        velocity_y = 0.8
                    if label == 'right':
                        velocity_y = -0.8
                    if label == 'stop':
                        velocity_y = 0

            if vehicle.channels['8'] < 1400:
                event.clear()

            # show the output frame
            cv2.imshow("Frame", output_color)
            key = cv2.waitKey(10) & 0xFF

            # update the FPS counter
            fps.update()
            # if the `Esc` key was pressed, break from the loop
            if key == 27:
                break

    finally:

        # Stop streaming
        pipeline.stop()
        # do a bit of cleanup
        # stop the timer and save FPS information
        fps.stop()

        progress("INFO: elapsed time: {:.2f}".format(fps.elapsed()))
        progress("INFO: approx. FPS: {:.2f}".format(fps.fps()))
        print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()), file=f)
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()), file=f)
        f.close()

        progress('INFO:end')
        time.sleep(3)

        cv2.destroyAllWindows()
        out.release()
Beispiel #45
0
 def __init__(self, n_on, mu_bkg):
     self.n_on = np.asanyarray(n_on)
     self.mu_bkg = np.asanyarray(mu_bkg)
Beispiel #46
0
import random

# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
from detectron2.modeling import build_model
from detectron2.checkpoint import DetectionCheckpointer
import torch
from PIL import Image
from predictor import VisualizationDemo
import pyrealsense2 as rs

image = np.asanyarray(Image.open("/home/hoangphuc/MASK_RCNN_TOMO/input.jpg"))


def setup_realsense():
    config = rs.config()
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    return config


def setup_config():
    model_path = "/home/hoangphuc/MASK_RCNN_TOMO//trained_models/model_final_resnet101_fpn.pth"
    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml"))
Beispiel #47
0
            negativeNodes.append(i+1)
            nodeColorMap.append('red')
    nx.draw(G, node_color=nodeColorMap, with_labels=True)
    plt.savefig(picfilename);
    plt.close();
    return positiveEntries,positiveNodes,negativeEntries,negativeNodes

if __name__ == '__main__':
    inputfilePath = "../data/dolphins.mtx"
    edges = np.loadtxt(inputfilePath, dtype=int, comments='%')
    G = nx.Graph()
    G.add_edges_from(edges)
    #G = nx.read_gml(inputfilePath)
    Adjacency_lists=G.adj
    node_list = list(G.node)
    A = nx.laplacian_matrix(G)
    laplacian_Matrix=A.todense()
    eigenValues, eigenVectors = np.linalg.eigh(laplacian_Matrix)
    EigV=eigenVectors.T
    #eigenValues.sort()
    sortedEigenValueIndex = np.argsort(eigenValues)
    secondSmallestEigenValue = eigenValues[sortedEigenValueIndex[1]]
    secondSmallestEigenVector = EigV[sortedEigenValueIndex[1]]
    kmeans=KMeans(n_clusters=3).fit(np.asanyarray(secondSmallestEigenVector).reshape(-1, 1))
    labels=kmeans.labels_
    outF = open("./tmp/membership.txt", "w")
    for line in labels:
        outF.write(str(line))
        outF.write("\n")
    outF.close()
Beispiel #48
0
 def __init__(self, n_on, n_off, alpha):
     self.n_on = np.asanyarray(n_on)
     self.n_off = np.asanyarray(n_off)
     self.alpha = np.asanyarray(alpha)
    def __init__(self, data=None, rows=None, cols=None, shape=None,
                 border_color=1, border_width=1, vmin=0.0,
                 vmax=1.0, vsym=False, global_bounds=True):

        assert data is None or (np.ndim(data) in (3, 4, 5) and
                                data.shape[-1] <= 3)
        if data is not None:
            data = np.asanyarray(data)

        if data is None:
            assert rows is not None and cols is not None, \
                "Must specify rows and cols if no data is specified"
            shape = shape

        elif data.ndim == 3:
            N = 1
            rows = 1
            cols = 1
            data = data[np.newaxis]
            shape = data.shape[1:3]

        elif data.ndim == 4:
            N = data.shape[0]

            if rows is None and cols is None:
                cols = int(np.ceil(np.sqrt(N)))
                rows = int(np.ceil(N / cols))
            elif rows is None:
                rows = int(np.ceil(N / cols))
            elif cols is None:
                cols = int(np.ceil(N / rows))
            shape = data.shape[1:3]

        elif data.ndim == 5:
            assert rows is None and cols is None
            rows = data.shape[0]
            cols = data.shape[1]
            data = data.reshape((-1,) + data.shape[2:])
            N = data.shape[0]
            shape = data.shape[1:3]

        self._border_color = self._prepare_color(border_color)
        self._rows = rows
        self._cols = cols
        self._shape = shape
        self._border = border_width

        b = self._border
        self._fullsize = (b + (shape[0] + b) * self._rows,
                          b + (shape[1] + b) * self._cols)

        self._data = np.ones(self._fullsize + (3,), dtype=np.float64)

        if global_bounds:
            if vmin is None:
                vmin = np.nanmin(data)
            if vmax is None:
                vmax = np.nanmax(data)

            if vsym:
                mx = max(abs(vmin), abs(vmax))
                vmin = -mx
                vmax = mx

        # Populate with data
        for i in range(min(N, rows * cols)):
            self.set_image(data[i], i // cols, i % cols,
                           vmin=vmin, vmax=vmax, vsym=vsym)
Beispiel #50
0
def hdfgroup2signaldict(group, lazy=False):
    global current_file_version
    global default_version
    if current_file_version < LooseVersion("1.2"):
        metadata = "mapped_parameters"
        original_metadata = "original_parameters"
    else:
        metadata = "metadata"
        original_metadata = "original_metadata"

    exp = {'metadata': hdfgroup2dict(
        group[metadata], lazy=lazy),
        'original_metadata': hdfgroup2dict(
            group[original_metadata], lazy=lazy),
        'attributes': {}
    }

    data = group['data']
    if lazy:
        data = da.from_array(data, chunks=data.chunks)
        exp['attributes']['_lazy'] = True
    else:
        data = np.asanyarray(data)
    exp['data'] = data
    axes = []
    for i in range(len(exp['data'].shape)):
        try:
            axes.append(dict(group['axis-%i' % i].attrs))
            axis = axes[-1]
            for key, item in axis.items():
                if isinstance(item, np.bool_):
                    axis[key] = bool(item)
                else:
                    axis[key] = ensure_unicode(item)
        except KeyError:
            break
    if len(axes) != len(exp['data'].shape):  # broke from the previous loop
        try:
            axes = [i for k, i in sorted(iter(hdfgroup2dict(
                group['_list_' + str(len(exp['data'].shape)) + '_axes'],
                lazy=lazy).items()))]
        except KeyError:
            raise IOError(not_valid_format)
    exp['axes'] = axes
    if 'learning_results' in group.keys():
        exp['attributes']['learning_results'] = \
            hdfgroup2dict(
                group['learning_results'],
                lazy=lazy)
    if 'peak_learning_results' in group.keys():
        exp['attributes']['peak_learning_results'] = \
            hdfgroup2dict(
                group['peak_learning_results'],
                lazy=lazy)

    # If the title was not defined on writing the Experiment is
    # then called __unnamed__. The next "if" simply sets the title
    # back to the empty string
    if "General" in exp["metadata"] and "title" in exp["metadata"]["General"]:
        if '__unnamed__' == exp['metadata']['General']['title']:
            exp['metadata']["General"]['title'] = ''

    if current_file_version < LooseVersion("1.1"):
        # Load the decomposition results written with the old name,
        # mva_results
        if 'mva_results' in group.keys():
            exp['attributes']['learning_results'] = hdfgroup2dict(
                group['mva_results'], lazy=lazy)
        if 'peak_mva_results' in group.keys():
            exp['attributes']['peak_learning_results'] = hdfgroup2dict(
                group['peak_mva_results'], lazy=lazy)
        # Replace the old signal and name keys with their current names
        if 'signal' in exp['metadata']:
            if "Signal" not in exp["metadata"]:
                exp["metadata"]["Signal"] = {}
            exp['metadata']["Signal"]['signal_type'] = \
                exp['metadata']['signal']
            del exp['metadata']['signal']

        if 'name' in exp['metadata']:
            if "General" not in exp["metadata"]:
                exp["metadata"]["General"] = {}
            exp['metadata']['General']['title'] = \
                exp['metadata']['name']
            del exp['metadata']['name']

    if current_file_version < LooseVersion("1.2"):
        if '_internal_parameters' in exp['metadata']:
            exp['metadata']['_HyperSpy'] = \
                exp['metadata']['_internal_parameters']
            del exp['metadata']['_internal_parameters']
            if 'stacking_history' in exp['metadata']['_HyperSpy']:
                exp['metadata']['_HyperSpy']["Stacking_history"] = \
                    exp['metadata']['_HyperSpy']['stacking_history']
                del exp['metadata']['_HyperSpy']["stacking_history"]
            if 'folding' in exp['metadata']['_HyperSpy']:
                exp['metadata']['_HyperSpy']["Folding"] = \
                    exp['metadata']['_HyperSpy']['folding']
                del exp['metadata']['_HyperSpy']["folding"]
        if 'Variance_estimation' in exp['metadata']:
            if "Noise_properties" not in exp["metadata"]:
                exp["metadata"]["Noise_properties"] = {}
            exp['metadata']['Noise_properties']["Variance_linear_model"] = \
                exp['metadata']['Variance_estimation']
            del exp['metadata']['Variance_estimation']
        if "TEM" in exp["metadata"]:
            if "Acquisition_instrument" not in exp["metadata"]:
                exp["metadata"]["Acquisition_instrument"] = {}
            exp["metadata"]["Acquisition_instrument"]["TEM"] = \
                exp["metadata"]["TEM"]
            del exp["metadata"]["TEM"]
            tem = exp["metadata"]["Acquisition_instrument"]["TEM"]
            if "EELS" in tem:
                if "dwell_time" in tem:
                    tem["EELS"]["dwell_time"] = tem["dwell_time"]
                    del tem["dwell_time"]
                if "dwell_time_units" in tem:
                    tem["EELS"]["dwell_time_units"] = tem["dwell_time_units"]
                    del tem["dwell_time_units"]
                if "exposure" in tem:
                    tem["EELS"]["exposure"] = tem["exposure"]
                    del tem["exposure"]
                if "exposure_units" in tem:
                    tem["EELS"]["exposure_units"] = tem["exposure_units"]
                    del tem["exposure_units"]
                if "Detector" not in tem:
                    tem["Detector"] = {}
                tem["Detector"] = tem["EELS"]
                del tem["EELS"]
            if "EDS" in tem:
                if "Detector" not in tem:
                    tem["Detector"] = {}
                if "EDS" not in tem["Detector"]:
                    tem["Detector"]["EDS"] = {}
                tem["Detector"]["EDS"] = tem["EDS"]
                del tem["EDS"]
            del tem
        if "SEM" in exp["metadata"]:
            if "Acquisition_instrument" not in exp["metadata"]:
                exp["metadata"]["Acquisition_instrument"] = {}
            exp["metadata"]["Acquisition_instrument"]["SEM"] = \
                exp["metadata"]["SEM"]
            del exp["metadata"]["SEM"]
            sem = exp["metadata"]["Acquisition_instrument"]["SEM"]
            if "EDS" in sem:
                if "Detector" not in sem:
                    sem["Detector"] = {}
                if "EDS" not in sem["Detector"]:
                    sem["Detector"]["EDS"] = {}
                sem["Detector"]["EDS"] = sem["EDS"]
                del sem["EDS"]
            del sem

        if "Sample" in exp["metadata"] and "Xray_lines" in exp[
                "metadata"]["Sample"]:
            exp["metadata"]["Sample"]["xray_lines"] = exp[
                "metadata"]["Sample"]["Xray_lines"]
            del exp["metadata"]["Sample"]["Xray_lines"]

        for key in ["title", "date", "time", "original_filename"]:
            if key in exp["metadata"]:
                if "General" not in exp["metadata"]:
                    exp["metadata"]["General"] = {}
                exp["metadata"]["General"][key] = exp["metadata"][key]
                del exp["metadata"][key]
        for key in ["record_by", "signal_origin", "signal_type"]:
            if key in exp["metadata"]:
                if "Signal" not in exp["metadata"]:
                    exp["metadata"]["Signal"] = {}
                exp["metadata"]["Signal"][key] = exp["metadata"][key]
                del exp["metadata"][key]

    if current_file_version < LooseVersion("3.0"):
        if "Acquisition_instrument" in exp["metadata"]:
            # Move tilt_stage to Stage.tilt_alpha
            # Move exposure time to Detector.Camera.exposure_time
            if "TEM" in exp["metadata"]["Acquisition_instrument"]:
                tem = exp["metadata"]["Acquisition_instrument"]["TEM"]
                exposure = None
                if "tilt_stage" in tem:
                    tem["Stage"] = {"tilt_alpha": tem["tilt_stage"]}
                    del tem["tilt_stage"]
                if "exposure" in tem:
                    exposure = "exposure"
                # Digital_micrograph plugin was parsing to 'exposure_time'
                # instead of 'exposure': need this to be compatible with
                # previous behaviour
                if "exposure_time" in tem:
                    exposure = "exposure_time"
                if exposure is not None:
                    if "Detector" not in tem:
                        tem["Detector"] = {"Camera": {
                            "exposure": tem[exposure]}}
                    tem["Detector"]["Camera"] = {"exposure": tem[exposure]}
                    del tem[exposure]
            # Move tilt_stage to Stage.tilt_alpha
            if "SEM" in exp["metadata"]["Acquisition_instrument"]:
                sem = exp["metadata"]["Acquisition_instrument"]["SEM"]
                if "tilt_stage" in sem:
                    sem["Stage"] = {"tilt_alpha": sem["tilt_stage"]}
                    del sem["tilt_stage"]

    return exp
Beispiel #51
0
def daycare(t1, t2, t3, n_dcc=29, n_ind=53, n_strains=33, freq_strains_commun=None,
            n_obs=36, time_end=10., batch_size=1, random_state=None):
    r"""Generate cross-sectional data from a stochastic variant of the SIS-model.

    This function simulates the transmission dynamics of bacterial infections in daycare centers
    (DCC) as described in Nummelin et al. [2013]. The observation model is however simplified to
    an equal number of sampled individuals among the daycare centers.

    The model is defined as a continuous-time Markov process with transition probabilities:

    Pr(I_{is}(t+dt)=1 | I_{is}(t)=0) = t1 * E_s(I(t)) + t2 * P_s, if \sum_{j=1}^N_s I_{ij}(t)=0
    Pr(I_{is}(t+dt)=1 | I_{is}(t)=0) = t3 * (t1 * E_s(I(t)) + t2 * P_s), otherwise
    Pr(I_{is}(t+dt)=0 | I_{is}(t)=1) = \gamma

    where:
    I_{is}(t) is the status of carriage of strain s for individual i.
    E_s(I(t)) is the probability of sampling the strain s
    t1 is the rate of transmission from other children at the DCC (\beta in paper).
    t2 is the rate of transmission from the community outside the DCC (\Lambda in paper).
    t3 scales the rate of an infected child being infected with another strain (\theta in paper).
    \gamma is the relative probability of healing from a strain.

    As in the paper, \gamma=1, and the other inferred parameters are relative to it.

    The system is solved using the Direct method [Gillespie, 1977].

    References
    ----------
    Numminen, E., Cheng, L., Gyllenberg, M. and Corander, J. (2013) Estimating the transmission
        dynamics of Streptococcus pneumoniae from strain prevalence data, Biometrics, 69, 748-757.
    Gillespie, D. T. (1977) Exact stochastic simulation of coupled chemical reactions.
        The Journal of Physical Chemistry 81 (25), 2340–2361.

    Parameters
    ----------
    t1 : float or np.array
        Rate of transmission from other individuals at the DCC.
    t2 : float or np.array
        Rate of transmission from the community outside the DCC.
    t3 : float or np.array
        Scaling of co-infection for individuals infected with another strain.
    n_dcc : int, optional
        Number of daycare centers.
    n_ind : int, optional
        Number of individuals in a DCC (same for all).
    n_strains : int, optional
        Number of bacterial strains considered.
    freq_strains_commun : np.array of shape (n_strains,), optional
        Prevalence of each strain in the community outside the DCC. Defaults to 0.1.
    n_obs : int, optional
        Number of individuals sampled from each DCC (same for all).
    time_end : float, optional
        The system is solved using the Direct method until all cases within the batch exceed this.
    batch_size : int, optional
    random_state : np.random.RandomState, optional

    Returns
    -------
    state_obs : np.array
        Observations in shape (batch_size, n_dcc, n_obs, n_strains).

    """
    random_state = random_state or np.random

    t1 = np.asanyarray(t1).reshape((-1, 1, 1, 1))
    t2 = np.asanyarray(t2).reshape((-1, 1, 1, 1))
    t3 = np.asanyarray(t3).reshape((-1, 1, 1, 1))

    if freq_strains_commun is None:
        freq_strains_commun = np.full(n_strains, 0.1)

    prob_commun = t2 * freq_strains_commun

    # the state (infection status) is a 4D tensor for computational performance
    state = np.zeros((batch_size, n_dcc, n_ind, n_strains), dtype=np.bool)

    # time for each DCC in the batch
    time = np.zeros((batch_size, n_dcc))

    n_factor = 1. / (n_ind - 1)
    gamma = 1.  # relative, see paper
    ind_b_dcc = [np.repeat(np.arange(batch_size), n_dcc), np.tile(np.arange(n_dcc), batch_size)]

    while np.any(time < time_end):
        with np.errstate(divide='ignore', invalid='ignore'):
            # probability of sampling a strain; in paper: E_s(I(t))
            prob_strain_adjust = np.nan_to_num(state / np.sum(state, axis=3, keepdims=True))
            prob_strain = np.sum(prob_strain_adjust, axis=2, keepdims=True)

        # Which individuals are already infected:
        intrainfect_rate = t1 * (np.tile(prob_strain, (1, 1, n_ind, 1)) -
                                 prob_strain_adjust) * n_factor + 1e-9

        # init prob to get infected, same for all
        hazards = intrainfect_rate + prob_commun  # shape (batch_size, n_dcc, 1, n_strains)

        # co-infection, depends on the individual's state
        # hazards = np.tile(hazards, (1, 1, n_ind, 1))
        any_infection = np.any(state, axis=3, keepdims=True)
        hazards = np.where(any_infection, t3 * hazards, hazards)

        # (relative) probability to be cured
        hazards[state] = gamma

        # normalize to probabilities
        inv_sum_hazards = 1. / np.sum(hazards, axis=(2, 3), keepdims=True)
        probs = hazards * inv_sum_hazards

        # times until next transition (for each DCC in the batch)
        delta_t = random_state.exponential(inv_sum_hazards[:, :, 0, 0])
        time = time + delta_t

        # choose transition
        probs = probs.reshape((batch_size, n_dcc, -1))
        cumprobs = np.cumsum(probs[:, :, :-1], axis=2)
        x = random_state.uniform(size=(batch_size, n_dcc, 1))
        ind_transit = np.sum(x >= cumprobs, axis=2)

        # update state, need to find the correct indices first
        ind_transit = ind_b_dcc + list(np.unravel_index(ind_transit.ravel(), (n_ind, n_strains)))
        state[ind_transit] = np.logical_not(state[ind_transit])

    # observation model: simply take the first n_obs individuals
    state_obs = state[:, :, :n_obs, :]

    return state_obs
Beispiel #52
0
def get_Lstar2(pos, date, alpha = 90.,
                  params = None, coord_system='GSM',
                  Bfield = 'Lgm_B_OP77',
                  internal_model = 'Lgm_B_IGRF',
                  LstarThresh = 10.0,  # beyond this Lsimple don't compute Lstar;; not used in get_Lstar2
                  extended_out = False,
                  LstarQuality = 3, 
                  FootpointHeight=100., 
                  Colorize=False, 
                  cverbosity=0, QinDenton=False):    
## void Lgm_ComputeLstarVersusPA( long int Date, double UTC, Lgm_Vector *u, int nAlpha, double *Alpha, int Quality, int Colorize, Lgm_MagEphemInfo *MagEphemInfo ) {

    # setup a datamodel object to hold the answer
    ans = Lstar_Data()

    # change datetime to Lgm Datelong and UTC
    try:
        datelong = Lgm_CTrans.dateToDateLong(date)
        utc = Lgm_CTrans.dateToFPHours(date)
    except AttributeError:
        raise(TypeError("Date must be a datetime object"))
    else:
        ans['Epoch'] = datamodel.dmarray([date])

    # pitch angles to calculate
    if isinstance(alpha, numbers.Real):
        Alpha = numpy.asanyarray([alpha], dtype=float)
    else:
        Alpha = numpy.asanyarray(alpha, dtype=float)

    # required setup
    MagEphemInfo = Lgm_MagEphemInfo.Lgm_MagEphemInfo(len(Alpha), cverbosity)

    # setup a shortcut to MagModelInfo
    mmi = MagEphemInfo.LstarInfo.contents.mInfo.contents
    Lgm_Set_Coord_Transforms( datelong, utc, mmi.c) # dont think mmi.c needs a pointer()

    # setup a shortcut to LstarInfo
    MagEphemInfo.LstarInfo.contents.VerbosityLevel = cverbosity
    MagEphemInfo.LstarQuality = LstarQuality
    MagEphemInfo.LstarInfo.contents.SaveShellLines = False
    MagEphemInfo.LstarInfo.contents.FindShellPmin = extended_out
    MagEphemInfo.LstarInfo.contents.LSimpleMax = 10.0;
    mmi.VerbosityLevel = 0;
    mmi.Lgm_LossConeHeight = FootpointHeight;

    #MagEphemInfo->LstarInfo->mInfo->Bfield        = Lgm_B_T89;
#    mmi.Bfield = Lgm_Wrap.__getattribute__(Bfield)
    Lgm_Wrap.__getattribute__('Lgm_Set_'+Bfield)(MagEphemInfo.LstarInfo.contents.mInfo)
    Lgm_Wrap.__getattribute__('Lgm_Set_'+internal_model+'_InternalModel')(MagEphemInfo.LstarInfo.contents.mInfo)    
    
    MagEphemInfo.nAlpha = len(Alpha)
    #if len(Alpha) > 1 and Bfield == 'Lgm_B_TS04':
    #    raise(NotImplementedError('TS04 is not thread safe!! Can only do 1 PA at a time'))

    for i in range(len(Alpha)):
        MagEphemInfo.Alpha[i] = Alpha[i]
        
    # convert to **GSM**
    if coord_system == 'GSM':
        try:
            Pgsm = Lgm_Vector.Lgm_Vector(*pos)
        except TypeError:
            raise(TypeError("Position must be listlike" ) )
        ans['position']['GSM'] = datamodel.dmarray(pos, attrs={'units':'Re'})
    elif coord_system == 'SM':
        try:
            Psm = Lgm_Vector.Lgm_Vector(*pos)
        except TypeError:
            raise(TypeError("Position must be listlike" ) )
        Pgsm = Lgm_Vector.Lgm_Vector()
        Lgm_Convert_Coords( pointer(Psm), pointer(Pgsm), SM_TO_GSM, mmi.c )
        ans['position']['SM'] = datamodel.dmarray(pos, attrs={'units':'Re'})
        ans['position']['GSM'] = datamodel.dmarray(Pgsm.tolist(), attrs={'units':'Re'})
    else:
        raise(NotImplementedError("Only GSM or SM input currently supported"))
    
## void Lgm_ComputeLstarVersusPA( long int Date, double UTC, Lgm_Vector *u, int nAlpha, 
##                                 double *Alpha, int Quality, int Colorize, Lgm_MagEphemInfo *MagEphemInfo ) {



    if QinDenton:# and Bfield == 'Lgm_B_TS04': # these are the params we will use.
        # Grab the QinDenton data
        # Lgm_get_QinDenton_at_JD( JD, &p, 1 );
        # JD = Lgm_Date_to_JD( Date, UTC, mInfo->c );
        JD = Lgm_Wrap.Lgm_Date_to_JD(datelong, utc, pointer(mmi.c))
        qd_one = Lgm_Wrap.Lgm_QinDentonOne()
        Lgm_Wrap.Lgm_get_QinDenton_at_JD( JD, pointer(qd_one), cverbosity)
        Lgm_Wrap.Lgm_set_QinDenton(pointer(qd_one), pointer(mmi.c))
        
        ans['params'] = dm.SpaceData()
        for att in dir(qd_one):
            if att[0] != '_':
                ans['params'][att] = getattr(qd_one, att)
    else:
        # save params
        ans['params'] = params
        if params == None:
            params = {}
        # step through the params dict and populate MagEphemInfo
        for key in params:
            if key == 'W':
                double6 = c_double*6
                W = double6(*params[key])
                MagEphemInfo.LstarInfo.contents.mInfo.contents.__setattr__(key, W)
            else:
                MagEphemInfo.LstarInfo.contents.mInfo.contents.__setattr__(key, params[key])
    
    Lgm_ComputeLstarVersusPA( ctypes.c_long(datelong), ctypes.c_double(utc), ctypes.pointer(Pgsm), 
                             ctypes.c_int(len(Alpha)), 
                             np.require(Alpha, requirements=['C']).ctypes.data_as(ctypes.POINTER(ctypes.c_double)), 
                             ctypes.c_int(LstarQuality), ctypes.c_int(Colorize), 
                             ctypes.pointer(MagEphemInfo) )

    for ii, pa in enumerate(Alpha):
        if int(pa) == pa:
            pa = int(pa)
        ans[pa] = dm.SpaceData()
        ans[pa]['LHilton'] = MagEphemInfo.LHilton[ii]
        ans[pa]['LMcIlwain'] = MagEphemInfo.LMcIlwain[ii]
        ans[pa]['Lstar'] = MagEphemInfo.Lstar[ii]

        # think in here, there are not owned by pyhton so there is no easy way to free the memory...
        if extended_out:
            ans[pa]['Bmin'] = dm.dmarray(np.zeros([MagEphemInfo.nShellPoints[ii], 3]))
            ans[pa]['I'] = dm.dmarray(np.zeros([MagEphemInfo.nShellPoints[ii]]))
            ans[pa]['Pmin'] = dm.dmarray(np.zeros([MagEphemInfo.nShellPoints[ii], 3]))
            ans[pa]['Bmin'][:, 0] = [val.x for val in MagEphemInfo.Shell_Bmin[ii][0:MagEphemInfo.nShellPoints[ii]]] 
            ans[pa]['Bmin'][:, 1] = [val.y for val in MagEphemInfo.Shell_Bmin[ii][0:MagEphemInfo.nShellPoints[ii]]] 
            ans[pa]['Bmin'][:, 2] = [val.z for val in MagEphemInfo.Shell_Bmin[ii][0:MagEphemInfo.nShellPoints[ii]]] 
            ans[pa]['I'][:] = [val for val in MagEphemInfo.ShellI[ii][0:MagEphemInfo.nShellPoints[ii]]]
            ans[pa]['Pmin'][:, 0] = [val.x for val in MagEphemInfo.Shell_Pmin[ii][0:MagEphemInfo.nShellPoints[ii]]] 
            ans[pa]['Pmin'][:, 1] = [val.y for val in MagEphemInfo.Shell_Pmin[ii][0:MagEphemInfo.nShellPoints[ii]]] 
            ans[pa]['Pmin'][:, 2] = [val.z for val in MagEphemInfo.Shell_Pmin[ii][0:MagEphemInfo.nShellPoints[ii]]] 
        
    return ans
Beispiel #53
0
    def _colorplot2d(self) -> None:
        assert self.data is not None
        xname = self.data.axes()[0]
        yname = self.data.axes()[1]
        xvals = np.asanyarray(self.data.data_vals(xname))
        yvals = np.asanyarray(self.data.data_vals(yname))
        depnames = self.data.dependents()
        depvals = [self.data.data_vals(d) for d in depnames]

        if self.complexRepresentation is ComplexRepresentation.real:
            nAxes = len(depnames)
        else:
            nAxes = 0
            for d in depnames:
                if self.dataIsComplex(d):
                    nAxes += 2
                else:
                    nAxes += 1
        axes = self._makeAxes(nAxes)

        iax = 0
        for zname, zvals in zip(depnames, depvals):

            # otherwise we sometimes raise ComplexWarning. This is basically just
            # cosmetic.
            if isinstance(zvals, np.ma.MaskedArray):
                zvals = zvals.filled(np.nan)

            if self.complexRepresentation is ComplexRepresentation.real \
                    or not self.dataIsComplex(zname):
                colorplot2d(axes[iax],
                            xvals,
                            yvals,
                            np.asanyarray(zvals).real,
                            self.plotType,
                            axLabels=(self.data.label(xname),
                                      self.data.label(yname),
                                      self.data.label(zname)))
                iax += 1

            elif self.complexRepresentation is ComplexRepresentation.realAndImag:
                colorplot2d(axes[iax],
                            xvals,
                            yvals,
                            np.asanyarray(zvals).real,
                            self.plotType,
                            axLabels=(self.data.label(xname),
                                      self.data.label(yname),
                                      f"Re( {self.data.label(zname)} )"))
                colorplot2d(axes[iax + 1],
                            xvals,
                            yvals,
                            np.asanyarray(zvals).imag,
                            self.plotType,
                            axLabels=(self.data.label(xname),
                                      self.data.label(yname),
                                      f"Im( {self.data.label(zname)} )"))
                iax += 2

            elif self.complexRepresentation is ComplexRepresentation.magAndPhase:
                colorplot2d(axes[iax],
                            xvals,
                            yvals,
                            np.abs(np.asanyarray(zvals)),
                            self.plotType,
                            axLabels=(self.data.label(xname),
                                      self.data.label(yname),
                                      f"Abs( {self.data.label(zname)} )"))
                colorplot2d(axes[iax + 1],
                            xvals,
                            yvals,
                            np.angle(np.asanyarray(zvals)),
                            self.plotType,
                            axLabels=(self.data.label(xname),
                                      self.data.label(yname),
                                      f"Arg( {self.data.label(zname)} )"),
                            norm=SymmetricNorm(),
                            cmap=symmetric_cmap)
                iax += 2
Beispiel #54
0
def train_continuous_mnist(args, model, device, train_loader, test_loader):
    ava_test = []
    weight_lst = utils.weight_lst(model)
    w_mat_lst, m_mat_lst, a_mat_lst, b_mat_lst, avg_psi_mat_lst, e_a_mat_lst, e_b_mat_lst = \
        utils.init_param(weight_lst, args.s_init, device, True, args.alpha)
    for task in range(len(test_loader)):
        for epoch in range(1, args.epochs + 1):
            for batch_idx, (data, target) in enumerate(train_loader[0]):
                model.train()
                data, target = data.to(device), target.to(device)
                data = data.view(-1, 784)
                for mc_iter in range(args.train_mc_iters):
                    # Phi ~ MN(0,I,I)
                    phi_mat_lst = utils.gen_phi(w_mat_lst, device)
                    # W = M +B*Phi*A^t
                    utils.randomize_weights(weight_lst, w_mat_lst, m_mat_lst,
                                            a_mat_lst, b_mat_lst, phi_mat_lst)
                    output = model(data)
                    criterion = nn.CrossEntropyLoss()
                    loss = args.batch_size * criterion(output, target)
                    utils.zero_grad(weight_lst)
                    loss.backward()
                    grad_mat_lst = utils.weight_grad(weight_lst, device)
                    utils.aggregate_grads(args, avg_psi_mat_lst, grad_mat_lst)
                    utils.aggregate_e_a(args, e_a_mat_lst, grad_mat_lst,
                                        b_mat_lst, phi_mat_lst)
                    utils.aggregate_e_b(args, e_b_mat_lst, grad_mat_lst,
                                        a_mat_lst, phi_mat_lst)
                # M = M - B*B^t*avg_Phi*A*A^t
                utils.update_m(m_mat_lst, a_mat_lst, b_mat_lst,
                               avg_psi_mat_lst, args.eta)
                utils.update_a_b(a_mat_lst, b_mat_lst, e_a_mat_lst,
                                 e_b_mat_lst, device, args.use_gsvd)
                utils.zero_matrix(avg_psi_mat_lst, e_a_mat_lst, e_b_mat_lst)
            model.eval()
            with torch.no_grad():
                correct = 0
                for data, target in test_loader[task]:
                    data, target = data.to(device), target.to(device)
                    data = data.view(-1, 784)
                    for mc_iter in range(args.train_mc_iters):
                        phi_mat_lst = utils.gen_phi(w_mat_lst, device)
                        utils.randomize_weights(weight_lst, w_mat_lst,
                                                m_mat_lst, a_mat_lst,
                                                b_mat_lst, phi_mat_lst)
                        output = model(data)
                        pred = output.argmax(
                            dim=1, keepdim=True
                        )  # get the index of the max log-probability
                        correct += pred.eq(target.view_as(pred)).sum().item()
                test_acc = 100. * correct / (len(test_loader[task].dataset) *
                                             args.train_mc_iters)
            print(
                '\nTask num {}, Epoch num {} Test Accuracy: {:.2f}%\n'.format(
                    task, epoch, test_acc))
        test_acc_lst = []
        for i in range(task + 1):
            model.eval()
            with torch.no_grad():
                correct = 0
                for data, target in test_loader[i]:
                    data, target = data.to(device), target.to(device)
                    data = data.view(-1, 784)
                    for mc_iter in range(args.train_mc_iters):
                        phi_mat_lst = utils.gen_phi(w_mat_lst, device)
                        utils.randomize_weights(weight_lst, w_mat_lst,
                                                m_mat_lst, a_mat_lst,
                                                b_mat_lst, phi_mat_lst)
                        output = model(data)
                        pred = output.argmax(
                            dim=1, keepdim=True
                        )  # get the index of the max log-probability
                        correct += pred.eq(target.view_as(pred)).sum().item()
                test_acc = 100. * correct / (len(test_loader[i].dataset) *
                                             args.train_mc_iters)
                test_acc_lst.append(test_acc)
            print('\nTraning task Num: {} Test Accuracy of task {}: {:.2f}%\n'.
                  format(task, i, test_acc))
        print(test_acc_lst)
        ava_test.append(np.average(np.asanyarray(test_acc_lst)))
    return ava_test
Beispiel #55
0
    def _plot1dSinglepanel(self) -> None:
        assert self.data is not None
        xname = self.data.axes()[0]
        xvals = np.asanyarray(self.data.data_vals(xname))
        depnames = self.data.dependents()
        depvals = [self.data.data_vals(d) for d in depnames]

        # count the number of panels we need.
        if self.complexRepresentation is ComplexRepresentation.magAndPhase or\
         self.complexRepresentation is ComplexRepresentation.realAndImag:
            nAxes = 2
        else:
            nAxes = 1
        axes = self._makeAxes(nAxes)

        if len(depvals) > 1:
            ylbl = self.data.label(depnames[0])
            phlbl: Optional[str] = f"Arg({depnames[0]})"
        else:
            ylbl = None
            phlbl = None

        for yname, yvals in zip(depnames, depvals):
            # otherwise we sometimes raise ComplexWarning. This is basically just
            # cosmetic.
            if isinstance(yvals, np.ma.MaskedArray):
                yvals = yvals.filled(np.nan)

            if self.dataIsComplex(yname):
                if self.complexRepresentation is ComplexRepresentation.realAndImag:
                    plot1dTrace(axes[0],
                                xvals,
                                np.asanyarray(yvals).real,
                                axLabels=(self.data.label(xname), ylbl),
                                curveLabel=f"Re({self.data.label(yname)})",
                                addLegend=(yname == depnames[-1]))
                    plot1dTrace(axes[1],
                                xvals,
                                np.asanyarray(yvals).imag,
                                axLabels=(self.data.label(xname), ylbl),
                                curveLabel=f"Im({yname})",
                                addLegend=(yname == depnames[-1]))
                elif self.complexRepresentation is ComplexRepresentation.magAndPhase:
                    plot1dTrace(axes[0],
                                xvals,
                                np.real(np.abs(yvals)),
                                axLabels=(self.data.label(xname), ylbl),
                                curveLabel=f"Abs({self.data.label(yname)})",
                                addLegend=(yname == depnames[-1]))
                    plot1dTrace(axes[1],
                                xvals,
                                np.angle(yvals),
                                axLabels=(self.data.label(xname), phlbl),
                                curveLabel=f"Arg({yname})",
                                addLegend=(yname == depnames[-1]))
                elif self.complexRepresentation is ComplexRepresentation.real:
                    plot1dTrace(axes[0],
                                xvals,
                                np.asanyarray(yvals).real,
                                axLabels=(self.data.label(xname), ylbl),
                                curveLabel=f"Re({self.data.label(yname)})",
                                addLegend=(yname == depnames[-1]))
            else:
                plot1dTrace(axes[0],
                            xvals,
                            np.asanyarray(yvals),
                            axLabels=(self.data.label(xname), ylbl),
                            curveLabel=self.data.label(yname),
                            addLegend=(yname == depnames[-1]))
Beispiel #56
0
    def __init__(self,
                 endog,
                 exog=None,
                 order=(1, 0),
                 trend='c',
                 error_cov_type='unstructured',
                 measurement_error=False,
                 enforce_stationarity=True,
                 enforce_invertibility=True,
                 trend_offset=1,
                 **kwargs):

        # Model parameters
        self.error_cov_type = error_cov_type
        self.measurement_error = measurement_error
        self.enforce_stationarity = enforce_stationarity
        self.enforce_invertibility = enforce_invertibility

        # Save the given orders
        self.order = order

        # Model orders
        self.k_ar = int(order[0])
        self.k_ma = int(order[1])

        # Check for valid model
        if error_cov_type not in ['diagonal', 'unstructured']:
            raise ValueError('Invalid error covariance matrix type'
                             ' specification.')
        if self.k_ar == 0 and self.k_ma == 0:
            raise ValueError('Invalid VARMAX(p,q) specification; at least one'
                             ' p,q must be greater than zero.')

        # Warn for VARMA model
        if self.k_ar > 0 and self.k_ma > 0:
            warn(
                'Estimation of VARMA(p,q) models is not generically robust,'
                ' due especially to identification issues.', EstimationWarning)

        # Trend
        self.trend = trend
        self.trend_offset = trend_offset
        self.polynomial_trend, self.k_trend = prepare_trend_spec(self.trend)
        self._trend_is_const = (self.polynomial_trend.size == 1
                                and self.polynomial_trend[0] == 1)

        # Exogenous data
        (self.k_exog, exog) = prepare_exog(exog)

        # Note: at some point in the future might add state regression, as in
        # SARIMAX.
        self.mle_regression = self.k_exog > 0

        # We need to have an array or pandas at this point
        if not _is_using_pandas(endog, None):
            endog = np.asanyarray(endog)

        # Model order
        # Used internally in various places
        _min_k_ar = max(self.k_ar, 1)
        self._k_order = _min_k_ar + self.k_ma

        # Number of states
        k_endog = endog.shape[1]
        k_posdef = k_endog
        k_states = k_endog * self._k_order

        # By default, initialize as stationary
        kwargs.setdefault('initialization', 'stationary')

        # By default, use LU decomposition
        kwargs.setdefault('inversion_method', INVERT_UNIVARIATE | SOLVE_LU)

        # Initialize the state space model
        super(VARMAX, self).__init__(endog,
                                     exog=exog,
                                     k_states=k_states,
                                     k_posdef=k_posdef,
                                     **kwargs)

        # Set as time-varying model if we have time-trend or exog
        if self.k_exog > 0 or (self.k_trend > 0 and not self._trend_is_const):
            self.ssm._time_invariant = False

        # Initialize the parameters
        self.parameters = OrderedDict()
        self.parameters['trend'] = self.k_endog * self.k_trend
        self.parameters['ar'] = self.k_endog**2 * self.k_ar
        self.parameters['ma'] = self.k_endog**2 * self.k_ma
        self.parameters['regression'] = self.k_endog * self.k_exog
        if self.error_cov_type == 'diagonal':
            self.parameters['state_cov'] = self.k_endog
        # These parameters fill in a lower-triangular matrix which is then
        # dotted with itself to get a positive definite matrix.
        elif self.error_cov_type == 'unstructured':
            self.parameters['state_cov'] = (int(self.k_endog *
                                                (self.k_endog + 1) / 2))
        self.parameters['obs_cov'] = self.k_endog * self.measurement_error
        self.k_params = sum(self.parameters.values())

        # Initialize trend data
        self._trend_data = prepare_trend_data(self.polynomial_trend,
                                              self.k_trend,
                                              self.nobs,
                                              offset=self.trend_offset)

        # Initialize known elements of the state space matrices

        # If we have exog effects, then the state intercept needs to be
        # time-varying
        if (self.k_trend > 0 and not self._trend_is_const) or self.k_exog > 0:
            self.ssm['state_intercept'] = np.zeros((self.k_states, self.nobs))
            # self.ssm['obs_intercept'] = np.zeros((self.k_endog, self.nobs))

        # The design matrix is just an identity for the first k_endog states
        idx = np.diag_indices(self.k_endog)
        self.ssm[('design', ) + idx] = 1

        # The transition matrix is described in four blocks, where the upper
        # left block is in companion form with the autoregressive coefficient
        # matrices (so it is shaped k_endog * k_ar x k_endog * k_ar) ...
        if self.k_ar > 0:
            idx = np.diag_indices((self.k_ar - 1) * self.k_endog)
            idx = idx[0] + self.k_endog, idx[1]
            self.ssm[('transition', ) + idx] = 1
        # ... and the  lower right block is in companion form with zeros as the
        # coefficient matrices (it is shaped k_endog * k_ma x k_endog * k_ma).
        idx = np.diag_indices((self.k_ma - 1) * self.k_endog)
        idx = (idx[0] + (_min_k_ar + 1) * self.k_endog,
               idx[1] + _min_k_ar * self.k_endog)
        self.ssm[('transition', ) + idx] = 1

        # The selection matrix is described in two blocks, where the upper
        # block selects the all k_posdef errors in the first k_endog rows
        # (the upper block is shaped k_endog * k_ar x k) and the lower block
        # also selects all k_posdef errors in the first k_endog rows (the lower
        # block is shaped k_endog * k_ma x k).
        idx = np.diag_indices(self.k_endog)
        self.ssm[('selection', ) + idx] = 1
        idx = idx[0] + _min_k_ar * self.k_endog, idx[1]
        if self.k_ma > 0:
            self.ssm[('selection', ) + idx] = 1

        # Cache some indices
        if self._trend_is_const and self.k_exog == 0:
            self._idx_state_intercept = np.s_['state_intercept', :k_endog, :]
        elif self.k_trend > 0 or self.k_exog > 0:
            self._idx_state_intercept = np.s_['state_intercept', :k_endog, :-1]
        if self.k_ar > 0:
            self._idx_transition = np.s_['transition', :k_endog, :]
        else:
            self._idx_transition = np.s_['transition', :k_endog, k_endog:]
        if self.error_cov_type == 'diagonal':
            self._idx_state_cov = (('state_cov', ) +
                                   np.diag_indices(self.k_endog))
        elif self.error_cov_type == 'unstructured':
            self._idx_lower_state_cov = np.tril_indices(self.k_endog)
        if self.measurement_error:
            self._idx_obs_cov = ('obs_cov', ) + np.diag_indices(self.k_endog)

        # Cache some slices
        def _slice(key, offset):
            length = self.parameters[key]
            param_slice = np.s_[offset:offset + length]
            offset += length
            return param_slice, offset

        offset = 0
        self._params_trend, offset = _slice('trend', offset)
        self._params_ar, offset = _slice('ar', offset)
        self._params_ma, offset = _slice('ma', offset)
        self._params_regression, offset = _slice('regression', offset)
        self._params_state_cov, offset = _slice('state_cov', offset)
        self._params_obs_cov, offset = _slice('obs_cov', offset)

        # Update _init_keys attached by super
        self._init_keys += [
            'order', 'trend', 'error_cov_type', 'measurement_error',
            'enforce_stationarity', 'enforce_invertibility'
        ] + list(kwargs.keys())
Beispiel #57
0
def is_integer_type(x):
    "Checks whether the array is of an integral type."
    return issubclass(np.asanyarray(x).dtype.type, np.integer)
Beispiel #58
0
    def _plot1dSeparatePanels(self) -> None:
        assert self.data is not None
        xname = self.data.axes()[0]
        xvals = np.asanyarray(self.data.data_vals(xname))
        depnames = self.data.dependents()
        depvals = [self.data.data_vals(d) for d in depnames]

        if self.complexRepresentation is ComplexRepresentation.real:
            nAxes = len(depnames)
        else:
            nAxes = 0
            for d in depnames:
                if self.dataIsComplex(d):
                    nAxes += 2
                else:
                    nAxes += 1
        axes = self._makeAxes(nAxes)
        hasLegend = [False for ax in axes]

        iax = 0
        for yname, yvals in zip(depnames, depvals):

            # otherwise we sometimes raise ComplexWarning. This is basically just
            # cosmetic.
            if isinstance(yvals, np.ma.MaskedArray):
                yvals = yvals.filled(np.nan)

            if self.dataIsComplex(yname):
                if self.complexRepresentation is ComplexRepresentation.realAndImag:
                    plot1dTrace(axes[iax],
                                xvals,
                                np.real(yvals),
                                axLabels=(self.data.label(xname),
                                          f"Re({self.data.label(yname)})"))
                    plot1dTrace(axes[iax + 1],
                                xvals,
                                np.imag(yvals),
                                axLabels=(self.data.label(xname),
                                          f"Im({self.data.label(yname)})"))
                    iax += 2
                elif self.complexRepresentation is ComplexRepresentation.magAndPhase:
                    if self.dataIsComplex(yname):
                        plot1dTrace(
                            axes[iax],
                            xvals,
                            np.real(np.abs(yvals)),
                            axLabels=(self.data.label(xname),
                                      f"Abs({self.data.label(yname)})"))
                        plot1dTrace(axes[iax + 1],
                                    xvals,
                                    np.angle(yvals),
                                    axLabels=(self.data.label(xname),
                                              f"Arg({yname})"))
                        iax += 2
                elif self.complexRepresentation is ComplexRepresentation.real:
                    if self.dataIsComplex(yname):
                        plot1dTrace(
                            axes[iax],
                            xvals,
                            np.asanyarray(yvals).real,
                            axLabels=(self.data.label(xname),
                                      f"Re({self.data.label(yname)})"),
                        )
                        iax += 1
            else:
                plot1dTrace(axes[iax],
                            xvals,
                            np.asanyarray(yvals),
                            axLabels=(self.data.label(xname),
                                      self.data.label(yname)))
                iax += 1
msk = np.random.rand(len(df)) < 0.8
train = df[msk]
test = df[msk]

# In[26]:

plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, color='blue')
plt.xlabel("Engine size")
plt.ylabel("Emission")
plt.show()

# In[27]:

from sklearn import linear_model
regr = linear_model.LinearRegression()
train_x = np.asanyarray(train[['ENGINESIZE']])
train_y = np.asanyarray(train[['CO2EMISSIONS']])
regr.fit(train_x, train_y)
# The coefficients
print('Coefficients: ', regr.coef_)
print('Intercept: ', regr.intercept_)

# In[28]:

plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, color='blue')
plt.plot(train_x, regr.coef_[0][0] * train_x + regr.intercept_[0], '-r')
plt.xlabel("Engine size")
plt.ylabel("Emission")
plt.show()

# In[29]:
Beispiel #60
0
def is_categorical_type(ary):
    "Checks whether the array is either integral or boolean."
    ary = np.asanyarray(ary)
    return is_integer_type(ary) or ary.dtype.kind == 'b'