Example #1
0
    def _set_startprob(self, startprob):

        if startprob is None:
            startprob = np.tile(1.0 / self.n_components, self.n_components)
        else:
            startprob = np.asarray(startprob, dtype=np.float)

            if not np.alltrue(startprob <= 1.0):
                normalize(startprob)

            if len(startprob) != self.n_components:
                if len(startprob) == self.n_unique:
                    startprob_split = np.copy(startprob) / (1.0+self.n_tied)
                    startprob = np.zeros(self.n_components)
                    for u in range(self.n_unique):
                        for t in range(self.n_chain):
                            startprob[u*(self.n_chain)+t] = \
                                startprob_split[u].copy()
                else:
                    raise ValueError("cannot match shape of startprob")

        if not np.allclose(np.sum(startprob), 1.0):
            raise ValueError('startprob must sum to 1.0')

        self._log_startprob = np.log(np.asarray(startprob).copy())
    def _make_grad_prod(ans, x):
        x = np.asarray(x)

        def gradient_product(g):
            return np.full(x.shape, g) * ans * (1 - ans)

        return gradient_product
Example #3
0
    def generate_random_problem(cls, k, l, n, sigma=1.):
        """Generate a random linear regression problem.

        Parameters:
        -----------

        k : int, dimension of X
        l : int, dimension of Y
        n : int, number of observations
        sigma: float, expected norm of additive noise on Y

        Returns:
        --------

        regression_problem : LinearRegressionProblem, class combining loss, data, solver
        """

        # independent gaussian vectors with approx unit norm
        X = 1 / np.sqrt(k) * np.random.standard_normal(size=(k, n))
        X -= np.mean(X, axis=1)[:, None]

        # unit norm transformation with uniform orientation
        W = np.asarray(
            [random_matrix.generate_random_unit_vector(dim=k) for _ in range(l)])\
            .T.squeeze()

        Y = np.dot(W.T, X)
        Y += np.sqrt(sigma / l) * np.random.standard_normal(size=Y.shape)

        return cls(X, Y)
Example #4
0
    def run_algo(self, algo, **kwargs):
        # Get function and compute gradient
        self.g = self.least_squares
        self.grad = compute_grad(self.g)

        # choose algorithm
        self.algo = algo
        if self.algo == 'gradient_descent':
            self.alpha = 10**-3
            if 'alpha' in kwargs:
                self.alpha = kwargs['alpha']

        self.max_its = 10
        if 'max_its' in kwargs:
            self.max_its = kwargs['max_its']

        self.w_init = np.random.randn(2)
        if 'w_init' in kwargs:
            self.w_init = kwargs['w_init']
            self.w_init = np.asarray([float(s) for s in self.w_init])
            self.w_init.shape = (len(self.w_init), 1)

        # run algorithm of choice
        if self.algo == 'gradient_descent':
            self.w_hist = []
            self.gradient_descent()
        if self.algo == 'newtons_method':
            self.hess = compute_hess(self.g)  # hessian of input function
            self.beta = 0
            if 'beta' in kwargs:
                self.beta = kwargs['beta']
            self.w_hist = []
            self.newtons_method()
Example #5
0
File: data.py Project: mtyhon/rPSMF
def generate_normal_data(nonlinearity=None,
                         d=20,
                         T=500,
                         n_pred=250,
                         r=6,
                         var=0.1,
                         **kwargs):
    C_true = np.random.randn(d, r)
    theta_true = np.asarray([[th * 1e-4] for th in range(1, r + 1)])

    x_true = {0: np.random.randn(r, 1)}

    y_true = {}
    y_obs = {}

    for t in range(1, T + n_pred + 1):
        x_true[t] = nonlinearity(theta_true, x_true[t - 1], t)
        y_true[t] = C_true @ x_true[t]
        y_obs[t] = C_true @ x_true[t] + np.sqrt(var) * np.random.randn(d, 1)

    y_train = {t: y_obs[t] for t in range(1, T + 1)}

    return dict(
        C_true=C_true,
        theta_true=theta_true,
        x_true=x_true,
        y_true=y_true,
        y_obs=y_obs,
        y_train=y_train,
    )
Example #6
0
def detector_response_dCS(frequencies,
                          chirpm,
                          symmratio,
                          spin1,
                          spin2,
                          Luminosity_Distance,
                          theta,
                          phi,
                          iota,
                          alpha_squared,
                          bppe,
                          NSflag,
                          cosmology=cosmology.Planck15):
    mass1 = utilities.calculate_mass1(chirpm, symmratio)
    mass2 = utilities.calculate_mass2(chirpm, symmratio)
    template = dcsimr_detector_frame(mass1=mass1,
                                     mass2=mass2,
                                     spin1=spin1,
                                     spin2=spin2,
                                     collision_time=0,
                                     collision_phase=0,
                                     Luminosity_Distance=Luminosity_Distance,
                                     phase_mod=alpha_squared,
                                     cosmo_model=cosmology,
                                     NSflag=NSflag)
    frequencies = np.asarray(frequencies)
    amp, phase, hreal = template.calculate_waveform_vector(frequencies)
    h_complex = amp * (np.exp(-1j * phase))

    Fplus = (1 / 2) * (1 + np.cos(theta)**2) * np.cos(2 * phi)
    Fcross = np.cos(theta) * np.sin(2 * phi)
    Q = (1 + np.cos(iota)**2) / 2 * Fplus + 1j * Fcross * np.cos(iota)
    template_detector_response = h_complex * Q
    return template_detector_response
Example #7
0
 def print_training_prediction(weights, train_inputs, train_targets):
     print "Training text                         Predicted text"
     logprobs = np.asarray(pred_fun(weights, train_inputs))
     for t in xrange(logprobs.shape[1]):
         training_text  = one_hot_to_string(train_targets[:,t,:])
         predicted_text = one_hot_to_string(logprobs[:,t,:])
         print training_text.replace('\n', ' ') + "| " + predicted_text.replace('\n', ' ')
Example #8
0
        def multivariate_t_rvs(self, m, S, random_state = None):
            '''generate random variables of multivariate t distribution
            Parameters
            ----------
            m : array_like
                mean of random variable, length determines dimension of random variable
            S : array_like
                square array of covariance  matrix
            df : int or float
                degrees of freedom
            n : int
                number of observations, return random array will be (n, len(m))
            random_state : int
                           seed
            Returns
            -------
            rvs : ndarray, (n, len(m))
                each row is an independent draw of a multivariate t distributed
                random variable
            '''
            np.random.rand(9)
            m = np.asarray(m)
            d = self.n_features
            df = self.degree_freedom
            n = 1
            if df == np.inf:
                x = 1.
            else:
                x = random_state.chisquare(df, n)/df
            np.random.rand(90)

            z = random_state.multivariate_normal(np.zeros(d),S,(n,))
            return m + z/np.sqrt(x)[:,None]
Example #9
0
 def tanh_feats(self,D):
     F = [np.ones((len(self.x)))]
     for deg in range(D):
         F.append(np.tanh(self.R[deg,0] + self.R[deg,1]*self.x))
     F = np.asarray(F)
     F.shape = (D+1,len(self.x))
     return F.T
Example #10
0
 def __new__(
     cls,
     array,
     name="unnamed",
     prior=None,
     constraint=None,
     step=0,
     std=None,
     m=None,
     v=None,
     vhat=None,
     fixed=False,
 ):
     obj = np.asarray(array, dtype=array.dtype).view(cls)
     obj.name = name
     if prior is not None:
         assert isinstance(prior, Prior)
     obj.prior = prior
     if constraint is not None:
         assert isinstance(constraint, Constraint) or isinstance(
             constraint, ConstraintChain)
     obj.constraint = constraint
     obj.step = step
     obj.std = std
     obj.m = m
     obj.v = v
     obj.vhat = vhat
     obj.fixed = fixed
     return obj
Example #11
0
    def setup_from_params(self, parameters, start=2, imax=100,
                          minmax=(2e-07, 4.5e-05),
                          find_mode=True, stds=None):

        """
        Find bounds of integration for a given parameterization

        Will attempt to set model wide variables appropriate to model
        given parameters. does a binary search over the kernel values
        to find the points whose value is between the minmax bounds.

        This function will not guarantee results when the starting point
        is not the mode of the kernel or if the functions is not
        monotonically decreasing away from the mode.

        This function is only necessary when working without data since
        bounds of integration can be inferred from the data.

        Args:
            parameters (tuple, list, or np.array): parameter values to
                initialize the model
            start (int) - uses that index from params | (float) - starts on
                that value | (else) - 0.
            imax(int): maximum number of steps before quitting search
            minmax(tuple): min, max values of kernel, by default searches
                for the range (2e-07, 4.5e-05)
            find_mode(bool): If True, searches for and begins from mode
                (default True)

        """
        assert isinstance(parameters, (tuple, list, np.ndarray))
        assert len(parameters)==len(self.kernel.pnames)

        support_fun = lambda x: self.kernel.kernel(x, parameters)

        if isinstance(start, int):
            start_value = parameters[start]
        elif isinstance(start, float):
            start_value = start
        else:
            start_value = 0.

        if find_mode is True:
            mode_fun = lambda x: -support_fun(x)
            mode = sp.optimize.minimize(mode_fun, start_value).x[0]

        else:
            mode = start_value

        self.i_min, self.i_max = \
            mathstats.find_support_bounds(support_fun,
                                          start=mode,
                                          which='both',
                                          minmax=minmax,
                                          imax=imax)

        self._itick_setup()

        self.params0 = copy.copy(np.asarray(parameters))
        self.params = parameters
Example #12
0
    def rvs(self, theta, n, random_state=None, return_histories=False):

        logging.info('Simulating %s epidemic evolutions for theta = %s', n,
                     theta)

        rng = check_random_state(random_state)

        all_x = []
        histories = []

        for i in range(n):
            if return_histories:
                _, (state,
                    history) = self._simulate_transmission(theta,
                                                           rng,
                                                           return_history=True)
                histories.append(history)
            else:
                _, state = self._simulate_transmission(theta,
                                                       rng,
                                                       return_history=False)

            x = self._calculate_observables(state)

            all_x.append(x)

        all_x = np.asarray(all_x)

        if return_histories:
            return all_x, histories
        return all_x
Example #13
0
def calculate_activation_contours(f,
                                  mn=0.05,
                                  mx=0.3,
                                  N=5,
                                  contour_lims=[-0.5, 1],
                                  contour_eps=1e-2,
                                  contour_tolerance=1.):
    """
    Compute contours of f by euler continuation from starting points
    linearly spaced along the x-axis between mn and mx.
    """
    starting_points = [
        np.asarray([x_val, 0]) for x_val in np.linspace(mn, mx, num=N)
    ]

    activation_contours = [
        trace_contour(starting_point,
                      f,
                      lims=contour_lims,
                      eps=contour_eps,
                      tolerance=contour_tolerance)
        for starting_point in starting_points
    ]

    contour_vals = [f(starting_point) for starting_point in starting_points]

    return activation_contours, contour_vals
Example #14
0
    def initialize_general_network_weights(self, layer_sizes, num_kernels,
                                           scale):
        # container for entire weight tensor
        weights = []
        kernel_weights = []

        # loop over desired kernel sizes and create appropriately sized initial
        # weight matrix for each kernel
        for k in range(num_kernels):
            # make weight matrix
            weight = scale * np.random.randn(3, 3)
            kernel_weights.append(weight)
        kernel_weights = np.asarray(kernel_weights)

        # loop over desired layer sizes and create appropriately sized initial
        # weight matrix for each layer
        for k in range(len(layer_sizes) - 1):
            # get layer sizes for current weight matrix
            U_k = layer_sizes[k]
            U_k_plus_1 = layer_sizes[k + 1]

            # make weight matrix
            weight = scale * np.random.randn(U_k + 1, U_k_plus_1)
            weights.append(weight)

        # re-express weights so that w_init[0] = omega_inner contains all
        # internal weight matrices, and w_init[1] = w contains weights of
        # final linear combination in predict function
        w_init = [weights[:-1], kernel_weights, weights[-1]]
        return w_init
Example #15
0
def _centered(arr, newshape):
    """Return the center newshape portion of the array.

    This function is used by `fft_convolve` to remove
    the zero padded region of the convolution.

    Note: If the array shape is odd and the target is even,
    the center of `arr` is shifted to the center-right
    pixel position.
    This is slightly different than the scipy implementation,
    which uses the center-left pixel for the array center.
    The reason for the difference is that we have
    adopted the convention of `np.fft.fftshift` in order
    to make sure that changing back and forth from
    fft standard order (0 frequency and position is
    in the bottom left) to 0 position in the center.
    """
    newshape = np.asarray(newshape)
    currshape = np.array(arr.shape)

    if not np.all(newshape <= currshape):
        msg = "arr must be larger than newshape in both dimensions, received {0}, and {1}"
        raise ValueError(msg.format(arr.shape, newshape))

    startind = (currshape - newshape + 1) // 2
    endind = startind + newshape
    myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]

    return arr[tuple(myslice)]
Example #16
0
def _pad(arr, newshape, axes=None):
    """Pad an array to fit into newshape

    Pad `arr` with zeros to fit into newshape,
    which uses the `np.fft.fftshift` convention of moving
    the center pixel of `arr` (if `arr.shape` is odd) to
    the center-right pixel in an even shaped `newshape`.
    """
    if axes is None:
        newshape = np.asarray(newshape)
        currshape = np.array(arr.shape)
        dS = newshape - currshape
        startind = (dS + 1) // 2
        endind = dS - startind
        pad_width = list(zip(startind, endind))
    else:
        # only pad the axes that will be transformed
        pad_width = [(0, 0) for axis in arr.shape]
        try:
            len(axes)
        except TypeError:
            axes = [axes]
        for a, axis in enumerate(axes):
            dS = newshape[a] - arr.shape[axis]
            startind = (dS + 1) // 2
            endind = dS - startind
            pad_width[axis] = (startind, endind)
    return np.pad(arr, pad_width, mode="constant")
    def mle_batch(self, data, batch, k):
        """
        Calculates LID values of data w.r.t batch
        Args:
            data: samples to calculate LIDs of
            batch: samples to calculate LIDs against
            k: the number of nearest neighbors to consider

        Returns: the calculated LID values

        """
        k = min(k, len(data) - 1)
        f = lambda v: -k / np.sum(np.log(v / v[-1]))

        gamma = self.classifier.kernel.gamma
        if gamma is None:
            gamma = 1.0 / self.training_data_ndarray.shape[1]

        if batch is None:
            # K = cdist(data, data)
            K = rbf_kernel(data, Y=data, gamma=gamma)
            K = np.reciprocal(K)
            # get the closest k neighbours
            a = np.apply_along_axis(np.sort, axis=1, arr=K)[:, 1:k + 1]
        else:
            batch = np.asarray(batch, dtype=np.float32)
            # K = cdist(data, batch)
            K = rbf_kernel(data, Y=batch, gamma=gamma)
            K = np.reciprocal(K)
            # get the closest k neighbours
            a = np.apply_along_axis(np.sort, axis=1, arr=K)[:, 0:k]

        a = np.apply_along_axis(f, axis=1, arr=a)
        return np.nan_to_num(a)
Example #18
0
 def poly_feats(self,D):
     F = []
     for deg in range(D+1):
         F.append(self.x**deg)
     F = np.asarray(F)
     F.shape = (D+1,len(self.x))
     return F.T
Example #19
0
def compose(T, R, Z, S=None):
    ''' Compose translations, rotations, zooms, [shears]  to affine

    Parameters
    ----------
    T : array-like shape (N,)
        Translations, where N is usually 3 (3D case)
    R : array-like shape (N,N)
        Rotation matrix where N is usually 3 (3D case)
    Z : array-like shape (N,)
        Zooms, where N is usually 3 (3D case)
    S : array-like, shape (P,), optional
       Shear vector, such that shears fill upper triangle above
       diagonal to form shear matrix.  P is the (N-2)th Triangular
       number, which happens to be 3 for a 4x4 affine (3D case)

    Returns
    -------
    A : array, shape (N+1, N+1)
        Affine transformation matrix where N usually == 3
        (3D case)

    Examples
    --------
    >>> T = [20, 30, 40] # translations
    >>> R = [[0, -1, 0], [1, 0, 0], [0, 0, 1]] # rotation matrix
    >>> Z = [2.0, 3.0, 4.0] # zooms
    >>> A = compose(T, R, Z)
    >>> A
    array([[ 0., -3.,  0., 20.],
           [ 2.,  0.,  0., 30.],
           [ 0.,  0.,  4., 40.],
           [ 0.,  0.,  0.,  1.]])
    >>> S = np.zeros(3)
    >>> B = compose(T, R, Z, S)
    >>> np.all(A == B)
    True

    A null set

    >>> compose(np.zeros(3), np.eye(3), np.ones(3), np.zeros(3))
    array([[1., 0., 0., 0.],
           [0., 1., 0., 0.],
           [0., 0., 1., 0.],
           [0., 0., 0., 1.]])
    '''
    n = len(T)
    R = np.asarray(R)
    if R.shape != (n, n):
        raise ValueError('Expecting shape (%d,%d) for rotations' % (n, n))
    A = np.eye(n + 1)
    if not S is None:
        Smat = striu2mat(S)
        ZS = np.dot(np.diag(Z), Smat)
    else:
        ZS = np.diag(Z)
    A[:n, :n] = np.dot(R, ZS)
    A[:n, n] = T[:]
    return A
def random_eval_experiment():
    '''
    Experiment illutrating how quickly global random evaluation will fail as a method of optimization.  Output is minimum value attained by random sampling over the cube [-1,1] x [-1,1] x... [-1,1] evaluating simple quadratic for 100, 1000, or 10000 times.  The dimension is increased from 1 to 100 and the minimum plotted for each dimension.
    '''
    # define symmetric quadratic N-dimensional
    g = lambda w: np.dot(w.T, w)

    # loop over dimensions, sample points, evaluate
    mean_evals = []
    big_dim = 100
    num_pts = 10000
    pt_stops = [100, 1000, 10000]
    for dim in range(big_dim):
        dim_eval = []
        m_eval = []
        for pt in range(num_pts):
            # generate random point using uniform
            r = 2 * np.random.rand(dim + 1) - 1
            e = g(r)
            dim_eval.append(e)

            # record mean and std of so many pts
            if (pt + 1) in pt_stops:
                m_eval.append(np.min(dim_eval))
        mean_evals.append(m_eval)

    # convert to array for easy access
    mean_evals_global = np.asarray(mean_evals)

    fig = plt.figure(figsize=(6, 3))

    # create subplot with 3 panels, plot input function in center plot
    gs = gridspec.GridSpec(1, 1, width_ratios=[1])
    fig.subplots_adjust(wspace=0.5, hspace=0.01)

    # plot input function
    ax = plt.subplot(gs[0])

    for k in range(len(pt_stops)):
        mean_evals = mean_evals_global[:, k]

        # scatter plot mean value
        ax.plot(np.arange(big_dim) + 1, mean_evals)

        # clean up plot - label axes, etc.,
        ax.set_xlabel('dimension of input')
        ax.set_ylabel('funciton value')

    # draw legend
    t = [str(p) for p in pt_stops]
    ax.legend(t, bbox_to_anchor=(1, 0.5))

    # draw horizontal axis
    ax.plot(np.arange(big_dim) + 1,
            np.arange(big_dim) * 0,
            linewidth=1,
            linestyle='--',
            color='k')
    plt.show()
Example #21
0
    def predict_percentile(
            self,
            df: DataFrame,
            *,
            ancillary_df: Optional[DataFrame] = None,
            p: float = 0.5,
            conditional_after: Optional[ndarray] = None) -> DataFrame:
        """
        Returns the median lifetimes for the individuals, by default. If the survival curve of an
        individual does not cross ``p``, then the result is infinity.
        http://stats.stackexchange.com/questions/102986/percentile-loss-functions

        Parameters
        ----------
        X:  numpy array or DataFrame
            a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
            can be in any order. If a numpy array, columns must be in the
            same order as the training data.
        ancillary_X: numpy array or DataFrame, optional
            a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
            can be in any order. If a numpy array, columns must be in the
            same order as the training data.
        p: float, optional (default=0.5)
            the percentile, must be between 0 and 1.
        conditional_after: iterable, optional
            Must be equal is size to df.shape[0] (denoted `n` above).  An iterable (array, list, series) of possibly non-zero values that represent how long the
            subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents
            :math:`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.
            The new timeline is the remaining duration of the subject, i.e. normalized back to starting at 0.


        Returns
        -------
        percentiles: DataFrame

        See Also
        --------
        predict_median

        """
        exp_mu_, sigma_ = self._prep_inputs_for_prediction_and_return_scores(
            df, ancillary_df)

        if conditional_after is None:
            return pd.DataFrame(
                exp_mu_ *
                np.exp(np.sqrt(2) * sigma_ * erfinv(2 * (1 - p) - 1)),
                index=_get_index(df))
        else:
            conditional_after = np.asarray(conditional_after)
            Z = (np.log(conditional_after) - np.log(exp_mu_)) / sigma_
            S = norm.sf(Z)

            return pd.DataFrame(
                exp_mu_ *
                np.exp(np.sqrt(2) * sigma_ * erfinv(2 * (1 - p * S) - 1)) -
                conditional_after,
                index=_get_index(df),
            )
Example #22
0
 def _get_weights(tt):
     tt = np.asarray(tt)
     W = np.zeros((tt.size, tt.size))
     h = np.diff(tt)
     for i in range(len(tt)):
         W[i, :i] += .5 * h[:i]
         W[i, 1:i + 1] += .5 * h[:i]
     return W
Example #23
0
    def __init__(self, sigma2s, wts=None):
        """
        Mixture of isotropic Gaussian kernels:
          sum wts[i] * exp(- ||x - y||^2 / (2 * sigma2s[i]))

        sigma2s: a list/array of squared bandwidths
        wts: a list/array of weights. Defaults to equal weights summing to 1.
        """
        self.sigma2s = sigma2s = np.asarray(sigma2s)
        assert len(sigma2s) > 0

        if wts is None:
            self.wts = wts = np.full(len(sigma2s), 1/len(sigma2s))
        else:
            self.wts = wts = np.asarray(wts)
            assert len(wts) == len(sigma2s)
            assert all(w >= 0 for w in wts)
Example #24
0
 def poly_feats(self,D):
     F = []
     for i in range(D+1):
         for j in range(D+1-i):
             f = (self.x[:,0]**i)*(self.x[:,1]**j)  
             F.append(f)
     F = np.asarray(F)
     return F.T
Example #25
0
def circulant_2d_vector_to_circulant_2d_matrix(circulant_2d_vector):

    circulant_2d_matrix = np.asarray([
        np.roll(circulant_2d_vector, ii)
        for ii in range(len(circulant_2d_vector))
    ])

    return circulant_2d_matrix
Example #26
0
 def print_training_prediction(weights):
     print("Training text                         Predicted text")
     logprobs = np.asarray(lstm_predict(weights, train_inputs))
     for t in range(logprobs.shape[1]):
         training_text  = one_hot_to_string(train_inputs[:,t,:])
         predicted_text = one_hot_to_string(logprobs[:,t,:])
         print(training_text.replace('\n', ' ') + "|" +
               predicted_text.replace('\n', ' '))
Example #27
0
def unflatten(flat, model):
    """Wrapper for :func:`_unflatten`.
    """
    # pylint:disable=len-as-condition
    res, tail = _unflatten(np.asarray(flat), model)
    if len(tail) != 0:
        raise ValueError('Flattened iterable has more elements than the model.')
    return res
Example #28
0
 def compute_evaluation(self, tetvalue):
     if tetvalue.top == 'T':
         if len(self.children) == 0:
             return 1
         else:
             evaluations = [[] for _ in range(len(tetvalue.multisets))]
             for i in range(len(tetvalue.multisets)):
                 child = self.children[i]
                 n_elements = len(tetvalue.multisets[i].elements)
                 print("elements", i, n_elements)
                 print(type(n_elements))
                 for subvalue in tetvalue.multisets[i].elements:
                     evaluations[i].append(
                         (child.compute_evaluation(subvalue[0]),
                          subvalue[1]))
             print(np.asarray(evaluations))
             return self.activation.forward(np.asarray(evaluations))
Example #29
0
 def trial_function(self, x_in, weights):
     """Compute the trail function c_t."""
     net = self.mlp.network_output(x_in, weights)
     y_const = np.asarray([x_in[0], 1.0])
     net_y = self.mlp.network_output(y_const, weights)
     dy_net = self.grad_network(y_const, weights)[1]
     return self.bc(x_in) + x_in[0] * (1.0 - x_in[0]) * x_in[1] * \
         (net - net_y - dy_net)
Example #30
0
 def _get_weights(tt):
     tt = np.asarray(tt)
     W = np.zeros((tt.size, tt.size))
     h = np.diff(tt)
     for i in range(len(tt)):
         W[i, :i] += .5 * h[:i]
         W[i, 1:i+1] += .5 * h[:i]
     return W
Example #31
0
    def set_loss_control_points(self):
        """Select a subset of control points used for training."""
        training_points = []
        training_indices = []
        points_and_indices = [(i, point)
                              for i, point in enumerate(self.control_points)]
        shuffle(points_and_indices)
        for i in range(len(points_and_indices)):
            if (i + 1) % self.every == 0:
                training_points.append(points_and_indices[i][1])
                training_indices.append(points_and_indices[i][0])
        self.loss_control_points = np.asarray(training_points)
        self.loss_indices = np.asarray(training_indices)
        len_training = len(self.loss_control_points)
        len_all = len(self.control_points)
        print("Selecting subset for training: {} out of {} \
selected.".format(len_training, len_all))
Example #32
0
    def get_dx_g(self, x):
        atoms4 = self.atoms4
        atoms3 = self.atoms3
        p = self.p
        d = self.d

        combos = np.asarray([[0, 1, 2], [1, 2, 3], [0, 2, 3], [0, 1, 3]])
        output = np.zeros((d, p))
        for k in range(p):
            atom4 = atoms4[k, :]
            angles4 = []
            # get identities of triangles on boundary of tetrahedron
            # which atoms3 are in the atoms4...
            actived = [
                np.where([
                    set(item).issubset(atom4[combos[i, :]]) for item in atoms3
                ])[0][0] for i in range(4)
            ]
            actived = np.asarray(actived, dtype=int)
            naive = np.reshape(x, (int(x.shape[0] / 3), 3))[actived, :]
            for i in range(4):
                a = atoms3[actived[i]]
                b = atom4[np.in1d(atom4, atoms3[actived[i]])]
                for j in range(3):
                    angles4.append(naive[i, np.where(a == b[j])[0]])
            # the jth positEion in the ith row contains the gradient corresponding to the jth position in the truncated atom4
            a4 = np.reshape(angles4, (4, 3))
            # fitin = g4(a4)[1]
            fitin = self.gradg4(a4)
            faceindex = np.zeros(4)
            for j in range(4):
                face = atom4[combos[j]]
                for i in range(4):
                    if collections.Counter(
                            atoms3[actived][i]) == collections.Counter(face):
                        faceindex[j] = i
            faceindex = np.asarray(faceindex, dtype=int)
            anglerowtooutput = actived[faceindex]
            #print(anglerowtooutput)
            for i in range(4):
                face = atom4[combos[i]]
                buffer = np.asarray(scipy.stats.rankdata(face) - 1, dtype=int)
                for j in range(3):
                    output[3 * anglerowtooutput[i] + buffer[j], k] = fitin[i,
                                                                           j]
        return (output)
def contour_plot(ax, g, pts, wmax, num_contours, my_colors, pts_levels):
    #### define input space for function and evaluate ####
    w1 = np.linspace(-wmax, wmax, 100)
    w2 = np.linspace(-wmax, wmax, 100)
    w1_vals, w2_vals = np.meshgrid(w1, w2)
    w1_vals.shape = (len(w1)**2, 1)
    w2_vals.shape = (len(w2)**2, 1)
    h = np.concatenate((w1_vals, w2_vals), axis=1)
    func_vals = np.asarray([g(s) for s in h])
    w1_vals.shape = (len(w1), len(w1))
    w2_vals.shape = (len(w2), len(w2))
    func_vals.shape = (len(w1), len(w2))

    ### make contour right plot - as well as horizontal and vertical axes ###
    # set level ridges
    levelmin = min(func_vals.flatten())
    levelmax = max(func_vals.flatten())
    cutoff = 0.3
    cutoff = (levelmax - levelmin) * cutoff
    numper = 3
    levels1 = np.linspace(cutoff, levelmax, numper)
    num_contours -= numper

    ##### plot filled contours with generic contour lines #####
    # produce generic contours
    levels2 = np.linspace(levelmin, cutoff, min(num_contours, numper))
    levels = np.unique(np.append(levels1, levels2))
    num_contours -= numper
    while num_contours > 0:
        cutoff = levels[1]
        levels2 = np.linspace(levelmin, cutoff, min(num_contours, numper))
        levels = np.unique(np.append(levels2, levels))
        num_contours -= numper

    # plot the contours
    ax.contour(w1_vals, w2_vals, func_vals, levels=levels[1:], colors='k')
    ax.contourf(w1_vals, w2_vals, func_vals, levels=levels, cmap='Blues')

    ###### add contour curves based on input points #####
    # add to this list the contours passing through input points
    ax.contour(w1_vals,
               w2_vals,
               func_vals,
               levels=pts_levels,
               colors='k',
               linewidths=3)
    ax.contour(w1_vals,
               w2_vals,
               func_vals,
               levels=pts_levels,
               colors=my_colors,
               linewidths=2.5)

    ###### clean up plot ######
    ax.set_xlabel('$w_0$', fontsize=12)
    ax.set_ylabel('$w_1$', fontsize=12, rotation=0)
    ax.axhline(y=0, color='k', zorder=0, linewidth=0.5)
    ax.axvline(x=0, color='k', zorder=0, linewidth=0.5)
Example #34
0
def problem1(part):
    x_train_N = ag_np.asarray([-5.0, -2.50, 0.00, 2.50, 5.0])
    y_train_N = ag_np.asarray([-4.91, -2.48, 0.05, 2.61, 5.09])

    n_samples_list = [1, 10, 100, 1000]

    # mean of w
    m_tilda_list = ag_np.linspace(-3.0, 5.0, 20)
    # log stddev of w
    s_tilda = ag_np.log(0.1)

    # mean of b
    m_bar = 0.0
    # log stddev of b
    s_bar = ag_np.log(0.1)

    loss_lists_for_all_of_the_sample_sizes = []
    for n_samples in n_samples_list:

        loss_list_for_one_of_the_sample_sizes = []

        for m_tilda in m_tilda_list:

            sum_loss_for_n_samples = 0
            for n in range(0, n_samples):

                if part == 'a':
                    loss = approximate_loss(m_tilda, m_bar, s_tilda, s_bar,
                                            x_train_N, y_train_N)
                if part == 'c':
                    loss = approximate_loss_grad(m_tilda, m_bar, s_tilda,
                                                 s_bar, x_train_N, y_train_N)

                sum_loss_for_n_samples += loss

            sum_loss_for_n_samples_and_m_tilda = sum_loss_for_n_samples / n_samples

            loss_list_for_one_of_the_sample_sizes.append(
                sum_loss_for_n_samples_and_m_tilda)

        loss_lists_for_all_of_the_sample_sizes.append(
            loss_list_for_one_of_the_sample_sizes)

    plot_prior_data(m_tilda_list, n_samples_list,
                    loss_lists_for_all_of_the_sample_sizes)
Example #35
0
    def _set_startprob_prior(self, startprob_prior):
        if startprob_prior is None or startprob_prior == 1.0:
            startprob_prior = np.zeros(self.n_components)
        else:
            startprob_prior = np.asarray(startprob_prior, dtype=np.float)

            if len(startprob_prior) != self.n_components:
                if len(startprob_prior) == self.n_unique:
                    startprob_prior_split = np.copy(startprob_prior) / \
                        (1.0 + self.n_tied)
                    startprob_prior = np.zeros(self.n_components)
                    for u in range(self.n_unique):
                        for t in range(self.n_chain):
                            startprob_prior[u*(self.n_chain)+t] = \
                                startprob_prior_split[u].copy()
                else:
                    raise ValueError("cannot match shape of startprob")

        self.startprob_prior = np.asarray(startprob_prior).copy()
Example #36
0
 def _set_mu_prior(self, mu_prior):
     if mu_prior is None:
         self._mu_prior_ = np.zeros((self.n_components, self.n_features))
     else:
         mu_prior = np.asarray(mu_prior)
         mu_prior = mu_prior.reshape(self.n_unique, self.n_features)
         if mu_prior.shape == (self.n_unique, self.n_features):
             self._mu_prior_ = mu_prior.copy()
         else:
             raise ValueError("cannot match shape of mu_prior")
Example #37
0
def gmm_objective(alphas,means,icf,x,wishart_gamma,wishart_m):
    def inner_term(ix,ik):
        xcentered = x[ix,:] - means[ik,:]
        Lxcentered = Qtimesx(Qdiags[ik,:],Ls[ik,:,:],xcentered)
        sqsum_Lxcentered = sqsum(Lxcentered)
        return alphas[ik] + sum_qs[ik] - 0.5*sqsum_Lxcentered

    n = x.shape[0]
    d = x.shape[1]
    k = alphas.size
    Qdiags = np.asarray([(np.exp(icf[ik,:d])) for ik in range(k)])
    sum_qs = np.asarray([(np.sum(icf[ik,:d])) for ik in range(k)])
    Ls = np.asarray([constructL(d,curr_icf) for curr_icf in icf])
    slse = 0
    for ix in range(n):
        lse = np.asarray([inner_term(ix,ik) for ik in range(k)])
        slse = slse + logsumexp(lse)

    CONSTANT = -n*d*0.5*np.log(2 * np.pi)
    return CONSTANT + slse - n*logsumexp(alphas) \
        + log_wishart_prior(d,wishart_gamma,wishart_m,sum_qs,Qdiags,icf)
Example #38
0
    def _set_transmat(self, transmat_val):
        if transmat_val is None:
            transmat = np.tile(1.0 / self.n_components,
                               (self.n_components, self.n_components))
        else:
            transmat_val[np.isnan(transmat_val)] = 0.0
            normalize(transmat_val, axis=1)

            if (np.asarray(transmat_val).shape == (self.n_components,
                                                   self.n_components)):
                transmat = np.copy(transmat_val)
            elif transmat_val.shape[0] == self.n_unique:
                transmat = self._ntied_transmat(transmat_val)
            else:
                raise ValueError("cannot match shape of transmat")

        if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
            raise ValueError('Rows of transmat must sum to 1.0')
        self._log_transmat = np.log(np.asarray(transmat).copy())
        underflow_idx = np.isnan(self._log_transmat)
        self._log_transmat[underflow_idx] = NEGINF
Example #39
0
def gmm_objective_split_inner(alphas,means,icf,x):
    def inner_term(ik):
        sum_qs = icf[ik,:d].sum()
        xcentered = x - means[ik,:]
        Qdiag = np.exp(icf[ik,:d])
        L = constructL(d,icf[ik,:])
        Lxcentered = Qtimesx(Qdiag,L,xcentered)
        return alphas[ik] + sum_qs - 0.5*sqsum(Lxcentered)
    
    d = x.size
    k = alphas.size
    lse = np.asarray([inner_term(ik) for ik in range(k)])
    return logsumexp(lse)
Example #40
0
    def _setup_times(self, tt,
                     h=None, tt_aug=None):
        """
        Handles storing of training times and augmenting the time vector.
        """
        # make sure all the times are arrays
        tt = [np.asarray(item) for item in tt]

        ttc, data_inds = handle_time_inds(tt, h)
        self.x_train_ = tt
        self.ttc = ttc
        self.data_inds = data_inds
        self.is_tt_aug = True

        # store the dimension variable
        _, K, R, D = self.dim
        self.dim = Dimensions(self.ttc.size, K, R, D)
        self.N_data = tuple(item.size for item in self.x_train_)
Example #41
0
 def _set_precision_prior(self, precision_prior):
     if precision_prior is None:
         self._precision_prior_ = \
         np.zeros((self.n_components, self.n_features, self.n_features))
     else:
         precision_prior = np.asarray(precision_prior)
         if len(precision_prior) == 1:
             self._precision_prior_ = np.tile(precision_prior,
             (self.n_components, self.n_features, self.n_features))
         elif \
         (precision_prior.reshape(self.n_unique, self.n_features, self.n_features)).shape \
         == (self.n_unique, self.n_features, self.n_features):
             self._precision_prior_ = \
             np.zeros((self.n_components, self.n_features, self.n_features))
             for u in range(self.n_unique):
                 for t in range(self.n_chain):
                     self._precision_prior_[u*(self.n_chain)+t] = precision_prior[u].copy()
         else:
             raise ValueError("cannot match shape of precision_prior")
Example #42
0
 def _set_var_prior(self, var_prior):
     var_prior = np.asarray(var_prior)
     if self.n_features == 1:
         self._set_precision_prior(1.0 / var_prior)
     else:
         self._set_precision_prior(np.linalg.inv(var_prior))
Example #43
0
def _read_kurucz_spec(f):
    """
    Read Kurucz spectra that have been precomputed

    Args:
        f (string) : path to the file to be read
        
    Returns:
        new_vel (real array) : velocity axis in km/s
        spectrum (real array) : spectrum for each velocity bin
    """
    f = open(f, "rb")
    res = f.read()
    
    n_chunk = struct.unpack('i',res[0:4])
    
    freq = []
    stokes = []
    cont = []
    
    left = 4
    
    for i in range(n_chunk[0]):
        
        right = left + 4
        n = struct.unpack('i',res[left:right])

        left = right
        right = left + 4
        nmus = struct.unpack('i',res[left:right])


        left = right
        right = left + 8*n[0]
        t1 = np.asarray(struct.unpack('d'*n[0],res[left:right]))
        freq.append(t1)        
                
        left = right
        right = left + 8*n[0]*nmus[0]

        t2 = np.asarray(struct.unpack('d'*n[0]*nmus[0],res[left:right])).reshape((n[0],nmus[0]))
        stokes.append(t2)

        left = right
        right = left + 8*n[0]*nmus[0]

        t2 = np.asarray(struct.unpack('d'*n[0]*nmus[0],res[left:right])).reshape((n[0],nmus[0]))
        cont.append(t2)
        
        left = right
        
    freq = np.concatenate(freq)
    stokes = np.concatenate(stokes)
    cont = np.concatenate(cont)

    ind = np.argsort(freq)
    freq = freq[ind]
    stokes = stokes[ind]
    cont = cont[ind]
    wavelength = const.c.to('cm/s').value / freq
    mean_wavelength = np.mean(wavelength)

    vel = (wavelength - mean_wavelength) / mean_wavelength * const.c.to('km/s').value

    nl, nmus = stokes.shape

# Reinterpolate in a equidistant velocity axis
    new_vel = np.linspace(np.min(vel), np.max(vel), nl)
    for i in range(nmus):
        interpolator = scipy.interpolate.interp1d(vel, stokes[:,i], kind='linear')
        stokes[:,i] = interpolator(new_vel)

    return new_vel, wavelength, stokes
Example #44
0
 def scalar_subtract_and_exp(a,scalar):
     return np.asarray([np.exp(a[i] - scalar) for i in range(a.size)])
Example #45
0
 def scalar_multiply(a,scalar):
     return np.asarray([(a[i] * scalar) for i in range(a.size)])
Example #46
0
 def cwise_multiply(a,b):
     return np.asarray([(a[i] * b[i]) for i in range(a.size)])
Example #47
0
 def _set_var_prior(self, var_prior):
     var_prior = np.asarray(var_prior)
     self._set_precision_prior(1.0 / var_prior)