Beispiel #1
0
def test_multivariate_normal_logpdf_batches_and_states_masked(D=10):
    # Test broadcasting over B batches, N datapoints, and K parameters with masks
    B = 3
    N = 100
    K = 5
    x = npr.randn(B, N, D)
    mask = npr.rand(B, N, D) < .5
    mu = npr.randn(K, D)
    L = npr.randn(K, D, D)
    Sigma = np.matmul(L, np.swapaxes(L, -1, -2))

    ll1 = multivariate_normal_logpdf(x[:, :, None, :],
                                     mu,
                                     Sigma,
                                     mask=mask[:, :, None, :])
    assert ll1.shape == (B, N, K)

    ll2 = np.empty((B, N, K))
    for b in range(B):
        for n in range(N):
            m = mask[b, n]
            if m.sum() == 0:
                ll2[b, n] = 0
            else:
                for k in range(K):
                    ll2[b, n, k] = mvn.logpdf(x[b, n][m], mu[k][m],
                                              Sigma[k][np.ix_(m, m)])

    assert np.allclose(ll1, ll2)
Beispiel #2
0
    def convolve_with_basis(self, signal):
        """
        Convolve each column of the event count matrix with this basis
        :param S:     signal: an array-like data, each series is (1, T) shape
        :return: TxB of inputs convolved with bases
        """
        (T,_) = signal.shape
        (R,B) = self.basis.shape


        # Initialize array for filtered stimulus
        F = np.empty((T,B))

        # Compute convolutions fo each basis vector, one at a time
        for b in np.arange(B):
            F[:,b] = sig.fftconvolve(signal,
                                       np.reshape(self.basis[:,b],(R,1)),
                                       'full')[:T,:]

        # Check for positivity
        if np.amin(self.basis) >= 0 and np.amin(signal) >= 0:
            np.clip(F, 0, np.inf, out=F)
            assert np.amin(F) >= 0, "convolution should be >= 0"

        return F
Beispiel #3
0
 def _ntied_transmat_prior(self, transmat_val):  # TODO: document choices
     transmat = np.empty((0, self.n_components))
     for r in range(self.n_unique):
         row = np.empty((self.n_chain, 0))
         for c in range(self.n_unique):
             if r == c:
                 subm = np.array(
                     sp.diags([transmat_val[r, c], 1.0], [0, 1],
                              shape=(self.n_chain, self.n_chain)).todense())
             else:
                 lower_left = np.zeros((self.n_chain, self.n_chain))
                 lower_left[self.n_tied, 0] = 1.0
                 subm = np.kron(transmat_val[r, c], lower_left)
             row = np.hstack((row, subm))
         transmat = np.vstack((transmat, row))
     return transmat
Beispiel #4
0
def load_cg_merged(filename):
    csvfile = open(filename,"rb")
    csvreader = csv.reader(csvfile)
    lines = [line for line in csvreader]
    csvfile.close()
    
    # number of samples
    n_instances = len(lines) - 1

    # number of features
    n_features = len(lines[0]) - 1

    # extract feature names
    feature_name = lines[0][1:]

    # extract case ids
    case_id = []

    # extract dataset
    data = np.empty((n_instances,n_features),dtype=list)
    for i in range(n_instances):
        case_id.append(lines[i+1][0])
        data[i,:] = lines[i+1][1:]
    
    return (feature_name,case_id,data)
Beispiel #5
0
 def _ntied_transmat_prior(self, transmat_val):  # TODO: document choices
     transmat = np.empty((0, self.n_components))
     for r in range(self.n_unique):
         row = np.empty((self.n_chain, 0))
         for c in range(self.n_unique):
             if r == c:
                 subm = np.array(sp.diags([transmat_val[r, c],
                                 1.0], [0, 1],
                     shape=(self.n_chain, self.n_chain)).todense())
             else:
                 lower_left = np.zeros((self.n_chain, self.n_chain))
                 lower_left[self.n_tied, 0] = 1.0
                 subm = np.kron(transmat_val[r, c], lower_left)
             row = np.hstack((row, subm))
         transmat = np.vstack((transmat, row))
     return transmat
Beispiel #6
0
def level_curve_question_8(f, x0, y0, oversampling=1, delta=0.1, N=N, eps=eps):
    if (oversampling == 1):
        return (level_curve_question_7(f, x0, y0))
    else:  # on utilise le squelette de la fonction level_curve
        res = np.empty((2, oversampling * N), dtype=float)
        c = f(x0, y0)
        # on calcule les bornes du premier segment
        a = np.array([x0, y0])
        res[:, 0] = a
        gradient = grad(f)(x0, y0)
        # On calcule le nouveau point
        u = mat_rot.dot(gradient)
        norme_u = np.linalg.norm(u)
        nouveau_point = np.array([x0, y0]) + (delta / norme_u) * u
        x, y = Newton(fonction_level_curve(f, x0, y0, c, delta),
                      nouveau_point[0], nouveau_point[1])
        b = np.array([x, y])
        res[:, 1 * oversampling] = b
        x0, y0 = x, y

        # puis on "remplit" entre a et b
        res[:, 1:oversampling] = interpolation(f, a, b, oversampling)

        # on calcule un troisième point
        gradient = grad(f)(x0, y0)
        u = mat_rot.dot(gradient)
        norme_u = np.linalg.norm(u)
        nouveau_point = np.array([x0, y0]) + (delta / norme_u) * u
        x, y = Newton(fonction_level_curve(f, x0, y0, c, delta),
                      nouveau_point[0], nouveau_point[1])
        res[:, 2 * oversampling] = np.array([x, y])
        x0, y0 = x, y

        # on remplit entre b et ce nouveau point
        res[:, 1 * oversampling + 1:2 * oversampling] = interpolation(
            f, b, np.array([x, y]), oversampling)

        # puis on calcule les suivants
        for i in range(3, N):
            gradient = grad(f)(x0, y0)
            # on se place à un nouveau point "dans le sens de grad(f)"
            u = mat_rot.dot(gradient)
            norme_u = np.linalg.norm(u)
            nouveau_point = np.array([x0, y0]) + (delta / norme_u) * u
            # on trouve les nouvelles coordonnées, sur le cercle de centre (x0, y0) et de rayon delta
            x, y = Newton(fonction_level_curve(f, x0, y0, c, delta),
                          nouveau_point[0], nouveau_point[1])
            res[:, i * oversampling] = np.array([x, y])
            # on remplit res entre ce nouveau point et celui trouvé à l'itération précédente
            res[:,
                (i - 1) * oversampling + 1:i * oversampling] = interpolation(
                    f, res[:, (i - 1) * oversampling], np.array([x, y]),
                    oversampling)
            # test d'auto-intersection
            if closed_segment_intersect(a, b, res[:, (i - 1) * oversampling],
                                        np.array([x, y])):
                print(f"Auto-intersection après {i} points.")
                return res[:, :i * oversampling]
            x0, y0 = x, y
        return res
Beispiel #7
0
def gamma(t: np.ndarray, P1: tuple or np.ndarray, P2: tuple or np.ndarray,
          u1: tuple or np.ndarray, u2: tuple or np.ndarray) -> np.ndarray:
    """
    This function generates a polynomial interpolation between two points in a 2D plane.

    :param t: The interpolated curve's normalized parameter
    :param P1: The first point tuple
    :param P2: The second point tuple
    :param u1: The first derivative vector
    :param u2: The second derivative vector

    :returns: The interpolated points' array
    """

    #   1. Unpacking the variables for easier processing
    u11, u12 = u1
    u21, u22 = u2
    x1, y1 = P1
    x2, y2 = P2

    #   2. Calculating the determinant of the (u1, u2) couple
    denom = (u12 * u21) - (u11 * u22)

    #   3.1. Using the second-degree polynomial interpolation method when possible
    if not np.isclose(denom, 0):
        #   3.1.1. Calculating the values of k1 and k2
        k1 = 2 * (((y2 - y1) * u21) - ((x2 - x1) * u22)) / denom
        k2 = 2 * (((x2 - x1) * u12) - ((y2 - y1) * u11)) / denom

        #   3.1.2. Calculating the explicit values of the a, b, c, d, e, f parameters
        a = x1
        b = k1 * u11
        c = k2 * u21 + x1 - x2
        d = y1
        e = k1 * u12
        f = k2 * u22 + y1 - y2

        #   3.1.3. Calculating the values of the x, y variables
        x = a + b * t + c * t**2
        y = d + e * t + f * t**2

    #   3.2. Switching to a linear interpolation method (ignoring the derivative vectors) if u1 and u2 are parallel vectors
    else:
        #   3.2.1. Calculating the explicit values of the a, b, c, d parameters
        a = x1
        b = x2 - x1
        c = y1
        d = y2 - y1

        #   3.2.2. Calculating the values of the x, y variables
        x = a + b * t
        y = c + d * t

    #   4. Concatenating the results into a single coordinates array
    points_interpolated = np.empty((2, t.shape[0]))
    points_interpolated[0, :] = x
    points_interpolated[1, :] = y

    return points_interpolated
  def test_stress(self):
    for structure in ('fcc', 'bcc', 'hcp', 'diamond', 'sc'):
      for repeat in ((1, 1, 1), (1, 2, 3)):
        for a in [3.0, 4.0]:
          atoms = bulk('Cu', structure, a=a).repeat(repeat)
          atoms.rattle()
          atoms.set_calculator(EMT())

          # Numerically calculate the ase stress
          d = 1e-9  # a delta strain

          ase_stress = np.empty((3, 3)).flatten()
          cell0 = atoms.cell

          # Use a finite difference approach that is centered.
          for i in [0, 4, 8, 5, 2, 1]:
            strain_tensor = np.zeros((3, 3))
            strain_tensor = strain_tensor.flatten()
            strain_tensor[i] = d
            strain_tensor = strain_tensor.reshape((3, 3))
            strain_tensor += strain_tensor.T
            strain_tensor /= 2
            strain_tensor += np.eye(3, 3)

            cell = np.dot(strain_tensor, cell0.T).T
            positions = np.dot(strain_tensor, atoms.positions.T).T
            atoms.cell = cell
            atoms.positions = positions
            ep = atoms.get_potential_energy()

            strain_tensor = np.zeros((3, 3))
            strain_tensor = strain_tensor.flatten()
            strain_tensor[i] = -d
            strain_tensor = strain_tensor.reshape((3, 3))
            strain_tensor += strain_tensor.T
            strain_tensor /= 2
            strain_tensor += np.eye(3, 3)

            cell = np.dot(strain_tensor, cell0.T).T
            positions = np.dot(strain_tensor, atoms.positions.T).T
            atoms.cell = cell
            atoms.positions = positions
            em = atoms.get_potential_energy()

            ase_stress[i] = (ep - em) / (2 * d) / atoms.get_volume()

          ase_stress = np.take(ase_stress.reshape((3, 3)), [0, 4, 8, 5, 2, 1])

          ag_stress = stress(parameters, atoms.positions, atoms.numbers, atoms.cell)

          # I picked the 0.03 tolerance here. I thought it should be closer, but
          # it is a simple numerical difference I am using for the derivative,
          # and I am not sure it is totally correct.
          self.assertTrue(np.all(np.abs(ase_stress - ag_stress) <= 0.03),
                          f'''
ase: {ase_stress}
ag : {ag_stress}')
diff {ase_stress - ag_stress}
''')
Beispiel #9
0
def cpeps(peps, config):
    shape = peps.shape
    cpeps = np.empty(shape, dtype=np.object)
    peps_config = np.reshape(config, peps.shape)
    for i in range(shape[0]):
        for j in range(shape[1]):
            cpeps[i, j] = peps[i, j][peps_config[i, j], :, :, :, :]
    return cpeps
Beispiel #10
0
    def mcmc(self,
             N=1000,
             burn=0,
             single=False,
             ptype="corr",
             s=1.,
             update_hess=False,
             new=False,
             use_tqdm=True):
        """

        :param N:
        :param burn:
        :param single:
        :param ptype:
        :param s:
        :param update_hess:
        :param new:
        :return:
        """
        if new is True:
            self.n_accepted = np.zeros(self.n_params, dtype=int)
            self.errors = []

        #build chain
        new_chain = np.empty((N, self.n_params + 1))
        #single or joint sampler
        if single is True: sample_fun = self._single_sample
        else: sample_fun = self._joint_sample
        #burn in if it's a new chain
        if self._chain.sum() == 0.:
            for _ in range(burn):
                try:
                    sample_fun(is_burn=True, ptype=ptype, s=s)
                except:
                    pass
        #sample
        if use_tqdm is True:
            range_fun = lambda n: tqdm(range(n))
        else:
            range_fun = lambda n: range(n)

        for i in range_fun(N):
            try:
                sample_fun(ptype=ptype, s=s)
            except:
                self.errors.append(i)

            new_chain[i, 0] = self.last_log_p
            new_chain[i, 1:] = self.params

        #either save chain or add new_chain to existing chain
        if self._chain.sum() == 0. or new is True:
            self._chain = new_chain
        else:
            self._chain = np.vstack((self._chain, new_chain))

        print(self.a_rates)
Beispiel #11
0
def maxFlatPoly(tau, P):
    a = np.empty(P + 1)
    i = np.arange(P + 1)
    for k in range(P + 1):
        a[k] = (-1)**k * binom(P, k) * np.prod(
            (2 * tau + i) / (2 * tau + k + i))
    a /= a.sum()

    return a
Beispiel #12
0
def ba_objective(cams, X, w, obs, feats):
    p = obs.shape[0]
    reproj_err = np.empty((p,2))
    for i in range(p):
        reproj_err[i] = compute_reproj_err(cams[obs[i,0]],X[obs[i,1]],w[i],feats[i])

    w_err = 1. - np.square(w)

    return (reproj_err, w_err)
Beispiel #13
0
        def animate(*args, **kwargs):
            global i
            ax0.clear()
            ax1.clear()

            ax0.set_xlim(-2, 4)
            ax0.set_ylim(-1, 5)
            ax1.set_xlim(0, len(hist_nloglik))
            ax1.set_ylim(min(hist_nloglik), max(hist_nloglik))

            N = 64
            X = np.linspace(-2, 4, N)
            Y = np.linspace(-1, 5, N)
            X, Y = np.meshgrid(X, Y)

            # Pack X and Y into a single 3-dimensional array
            pos = np.empty(X.shape + (2,))
            pos[:, :, 0] = X
            pos[:, :, 1] = Y
            rv = multivariate_normal(true_mean, true_cov)
            Z = rv.pdf(pos)
            levelsf = MaxNLocator(nbins=8).tick_values(Z.min(), Z.max())
            ax0.contourf(X, Y, Z, levels=levelsf, antialiased = True)

            X = np.linspace(hist_vi_mean[i][0] - 5 * np.exp(hist_vi_logvars[i][0]),
                            hist_vi_mean[i][0] + 5 * np.exp(hist_vi_logvars[i][0]), 32)
            Y = np.linspace(hist_vi_mean[i][1] - 5 * np.exp(hist_vi_logvars[i][1]),
                            hist_vi_mean[i][1] + 5 * np.exp(hist_vi_logvars[i][1]), 32)
            X, Y = np.meshgrid(X, Y)

            # Pack X and Y into a single 3-dimensional array
            pos = np.empty(X.shape + (2,))
            pos[:, :, 0] = X
            pos[:, :, 1] = Y
            rv = multivariate_normal(hist_vi_mean[i], np.diag(np.exp(hist_vi_logvars[i])))
            Z = rv.pdf(pos)
            levelsf = MaxNLocator(nbins=10).tick_values(Z.min(), Z.max())
            ax0.contour(X, Y, Z, levels=levelsf, cmap='YlOrRd',)
            ax1.plot(hist_nloglik[:i])
            # ax0.legend()

            i += step
            i = min(i, len(hist_nloglik) - 1)
            return ax0, ax1
 def atoms_mapfn(a):
   d = positions0 + displacement - positions0[a]
   i = indices[(d**2).sum(1) < (cutoff_radius**2 + skin)]
   if n1 == 0 and n2 == 0 and n3 == 0:
     i = i[i > a]
   neighbors[a] = np.concatenate((neighbors[a], i))
   disp = np.empty((len(i), 3), int)
   disp[:] = (n1, n2, n3)
   disp += offsets[i] - offsets[a]
   displacements[a] = np.concatenate((displacements[a], disp))
    def __init__(self,
                 potential_energy,
                 kinetic_energy,
                 kinetic_energy_distribution,
                 random=None,
                 diagnostic_mode=False):
        if random is not None:
            self.random = random
        else:
            self.random = np.random.RandomState(0)
        self.D = np.max(kinetic_energy_distribution(1).shape)
        self.potential_energy = potential_energy
        self.kinetic_energy = kinetic_energy
        self.total_energy = lambda position, momentum: potential_energy(
            position) + kinetic_energy(momentum)

        self.sample_momentum = lambda n: kinetic_energy_distribution(
            n).reshape((1, self.D))
        self.grad_potential_energy = grad(potential_energy)

        self.params = {
            'step_size': 0.1,
            'leapfrog_steps': 10,
            'total_samples': 1000,
            'burn_in': 0.1,
            'thinning_factor': 1,
            'diagnostic_mode': diagnostic_mode
        }

        self.accepts = 0.
        self.iterations = 0.
        self.trace = np.empty((1, self.D))
        self.potential_energy_trace = np.empty((1, ))

        assert self.sample_momentum(1).shape == (1, self.D)
        assert isinstance(self.potential_energy(self.sample_momentum(1)),
                          float)
        assert isinstance(self.kinetic_energy(self.sample_momentum(1)), float)
        assert isinstance(
            self.total_energy(self.sample_momentum(1),
                              self.sample_momentum(1)), float)
        assert self.grad_potential_energy(
            self.sample_momentum(1)).shape == (1, self.D)
Beispiel #16
0
def ba_objective(cams, X, w, obs, feats):
    p = obs.shape[0]
    reproj_err = np.empty((p, 2))
    for i in range(p):
        reproj_err[i] = compute_reproj_err(cams[obs[i, 0]], X[obs[i, 1]], w[i],
                                           feats[i])

    w_err = 1. - np.square(w)

    return (reproj_err, w_err)
Beispiel #17
0
def broadcast1024(*args):
    """Extend numpy.broadcast to accept 1024 inputs, rather than the default 32."""
    ngroups = int(np.ceil(len(args) / 32))
    if ngroups == 1:
        return np.broadcast(*args)
    else:
        return np.broadcast(*[
            np.empty(np.broadcast(*args[n * 32:(n + 1) * 32]).shape)
            for n in range(ngroups)
        ])
Beispiel #18
0
def triang_to_flat(L):
    D, _, M = L.shape
    N = M * (M + 1) // 2
    flat = np.empty((N, D))
    for d in range(D):
        count = 0
        for m in range(M):
            for mm in range(m + 1):
                flat[count, d] = L[d, m, mm]
                count = count + 1
    return flat
    def liquid_density_star(self, temperature_star, quadrupole_star, bond_length_star):
        """Computes the reduced liquid density of the two-center
        Lennard-Jones model for a given set of model parameters over
        a specified range of temperatures.

        Parameters
        ----------
        temperature_star: numpy.ndarray
            The reduced temperatures to evaluate the reduced density at.
        quadrupole_star: float
            The reduced quadrupole parameter.
        bond_length_star: float
            The reduced bond-length parameter

        Returns
        -------
        numpy.ndarray
            The reduced density.
        """

        _b_C1, _b_C2, _b_C3 = (
            self._b_C1,
            self._b_C2_L,
            self._b_C3_L,
        )

        t_c_star = self.critical_temperature_star(quadrupole_star, bond_length_star)
        rho_c_star = self.critical_density_star(quadrupole_star, bond_length_star)

        tau = t_c_star - temperature_star

        if np.all(tau > 0):

            coefficient_1 = self._correlation_function_1(
                quadrupole_star, bond_length_star, _b_C1
            )
            coefficient_2 = self._correlation_function_2(
                quadrupole_star, bond_length_star, _b_C2
            )
            coefficient_3 = self._correlation_function_3(
                quadrupole_star, bond_length_star, _b_C3
            )

            x_0 = 1.0 * rho_c_star
            x_1 = tau ** (1.0 / 3.0) * coefficient_1
            x_2 = tau * coefficient_2
            x_3 = tau ** (3.0 / 2.0) * coefficient_3

            rho_star = x_0 + x_1 + x_2 + x_3

        else:
            rho_star = np.empty(temperature_star.shape) * np.nan

        return rho_star
Beispiel #20
0
    def fisher_information_matrix(self, theta):
        n_params = len(np.atleast_1d(theta))
        fisher = np.empty(shape=(n_params, n_params))
        grad_mean = self.mean.gradient(*theta)
        mean = self.mean(*theta)

        for i in range(n_params):
            for j in range(i, n_params):
                fisher[i, j] = (grad_mean[i] * grad_mean[j] / mean).sum()
                fisher[j, i] = fisher[i, j]
        return len(self.data) * fisher / (1 - self.mean(*theta))
Beispiel #21
0
def test_policy(p,
                n,
                visible=False,
                sd=0.03,
                bottom=False,
                nlm=None,
                Top=False):
    system = CartPole(visual=visible)
    #system.setState(np.array([0,0.01,-0.01,-0.01]))
    system.setState(
        np.array([
            random.normalvariate(0, sd),
            random.normalvariate(0, sd),
            random.normalvariate(0, sd),
            random.normalvariate(0, sd)
        ]))
    if nlm != None and Top == False: bottom = True
    if bottom:
        system.setState(np.array([0, 0, np.pi, 0]))
        #sd=1
        #system.setState(np.array([random.normalvariate(0, sd), random.normalvariate(0, sd), np.pi ,random.normalvariate(0, sd)]))
        #system.setState(np.array([-5.5,-3.36,-0.56,0.42]))

    state_history = np.empty((n, 4))
    #system.setState(np.array([0, 0, 0,5.1]))
    #system.setState(np.array([0, 0.2, 0.05, -0.2])*5)
    # system.setState(np.array([0, 0.2, 1, -4]) ) #get it to 1 rad at -4rads^1 and itll settle
    for i in range(n):
        x = system.getState()
        if x.ndim == 1:
            state_history[i, :] = x
        else:
            state_history[i, :] = x.reshape((4))

        if nlm == None:
            force = np.matmul(p, x)
        else:
            x_ext = nlm.transform_x(x)
            force = np.matmul(p, x_ext)
            #state_history[i,2]=force

        system.performAction(force)
        system.remap_angle()

    plt.plot(state_history[:, 2], label=r'$\theta$')
    plt.plot(state_history[:, 1], label='v')
    plt.plot(state_history[:, 3], label=r'$\dot{\theta}$')
    plt.plot(state_history[:, 0], label='x')
    plt.xlabel('Iteration')
    #plt.ylabel(r'$\theta$')
    plt.legend(loc='lower left')
    plt.title('P=' + str(p))
    #plt.ylim([-0.2,0.2])
    plt.show()
Beispiel #22
0
def parse_display(spec):
    mean = np.array(ast.literal_eval(read(
        spec['mean']))) if 'mean' in spec else np.array([0])
    std = np.array(ast.literal_eval(read(
        spec['std']))) if 'std' in spec else np.array([1])
    resolution = np.array(ast.literal_eval(read(
        spec['resolution']))) if 'resolution' in spec else np.empty(0)

    display = Display(mean, std, resolution)

    return display
Beispiel #23
0
    def train_lstm(self):
        num_epochs = 100
        # sanity check: check the shape of train input, train output, and test input
        # print('shape of train input: {0}, train output: {1}, test input: {2}'.format(self.train_input.shape, self.train_output.shape, self.test_input.shape))

        callbacks = [EarlyStopping(monitor='val_loss', patience=10)]
        iter_cnt = 0
        pred_train_output_mat = np.empty((0, self.len_train_output), np.float)
        pred_test_output_mat = np.empty((0, self.num_output), np.float)
        while iter_cnt < self.num_ensemble:
            self.history = self.model.fit(self.train_input, self.train_output, validation_split=0.15, shuffle=False,
                                          batch_size=1, epochs=num_epochs, callbacks=callbacks, verbose=0)

            # get the predicted train output
            pred_train_output = self.model.predict(self.train_input)
            pred_train_output_mat = np.zeros(shape=(self.num_output, self.len_train_output), dtype=np.float)
            pred_train_output_mat.fill(np.nan)
            for i in range(self.num_sequence):
                seq_pred_train_output = post_process_results(pred_train_output[i],
                                                             denom=self.train_denom_list[i],
                                                             ts_seasonality_in=self.ts_seasonality_in,
                                                             shift=i,
                                                             freq=self.freq).ravel()
                pred_train_output_mat[i % self.num_output, i: i + self.num_output] = seq_pred_train_output
            iter_pred_train_output = np.nanmean(pred_train_output_mat, axis=0)
            iter_train_smape, _ = smape(self.true_train_output, iter_pred_train_output)
            if iter_train_smape < 150:
                # get the predicted test output
                iter_pred_test_output = self.model.predict(self.test_input).ravel()
                iter_pred_test_output = post_process_results(iter_pred_test_output,
                                                             denom=self.test_denom,
                                                             ts_seasonality_in=self.ts_seasonality_in,
                                                             shift=self.len_train_output,
                                                             freq=self.freq).ravel()

                pred_train_output_mat = np.vstack((pred_train_output_mat, iter_pred_train_output))
                pred_test_output_mat = np.vstack((pred_test_output_mat, iter_pred_test_output))
                iter_cnt += 1

        self.pred_train_output = np.nanmean(pred_train_output_mat, axis=0)
        self.pred_test_output = np.nanmean(pred_test_output_mat, axis=0)
Beispiel #24
0
def lds_simulate_loop(T, A, C, Q, R, mu0, Q0, ntrials):
    # write version that broadcasts over trials at some point

    d = A.shape[1]
    D = C.shape[0]

    x = np.empty((ntrials, T, d))
    y = np.empty((ntrials, T, D))

    L_R = np.linalg.cholesky(R)
    L_Q = np.linalg.cholesky(Q)

    for n in range(ntrials):
        x[n,0] = np.random.multivariate_normal(mu0, cov=Q0)
        y[n,0] = np.dot(C, x[n,0]) + np.dot(L_R, np.random.randn(D))

        for t in range(1, T):
            x[n,t] = np.dot(A[t-1], x[n,t-1]) + np.dot(L_Q[t-1], np.random.randn(d))
            y[n,t] = np.dot(C, x[n,t]) + np.dot(L_R, np.random.randn(D))

    return x, y
Beispiel #25
0
def general_rank_2_QN(k,f,gradient,c,x_0, init_b0):

    # B_0 is our HESSIAN approximation (NOT hessian inverse). This is very critical!
    B_0 = init_b0
    counter = 0
    x_k = x_0
    B_k = B_0
    cond = True

    # Initalize the plots
    x_iterates = np.empty((k + 1, 2))
    x_iterates[0] = x_0

    '''
    Inner function which specifies the update rule
    '''
    def update(B_k, y_k, s_k  ):
        return rank_2_B_update(B_k, y_k, s_k, s_k)
        pass


    while cond:

        # new iterates
        search_direction = np.linalg.solve(B_k, gradient(x_k))
        x_k_and_1  = x_k - search_direction #equiv to finding B^{-1} * grad. equiv again to solving B\delta = grad; for \delta
        # compute k+1 quantities
        y_k = gradient(x_k_and_1) - gradient(x_k)

        s_k = x_k_and_1 - x_k

        # Terminate if we have converged in finite steps
        if not np.any(s_k):
            # Fix rest of iterates to the converged value
            for j in range(counter, k+1):
                x_iterates[j] = x_k
            break


        # compute the next B_{k+1} iteration
        B_k_and_1 = update(B_k, y_k, s_k )

        # update the matrix:
        B_k = B_k_and_1
        x_k = x_k_and_1

        # logic for checking whether to terminate or not
        not_done = True
        counter += 1
        cond = counter < k and not_done
        x_iterates[counter] = x_k

    return x_k, x_iterates
Beispiel #26
0
def epeps(pepsa, pepsb):
    shape = pepsa.shape
    epeps = np.empty(shape, dtype=np.object)
    for i in range(shape[0]):
        for j in range(shape[1]):
            epeps[i, j] = einsum("pludr,pLUDR->lLuUdDrR", pepsa[i, j],
                                 pepsb[i, j])
            eshape = epeps[i, j].shape
            epeps[i, j] = np.reshape(
                epeps[i, j], (eshape[0] * eshape[1], eshape[2] * eshape[3],
                              eshape[4] * eshape[5], eshape[6] * eshape[7]))
    return epeps
Beispiel #27
0
def gd(n,f,dfdx,x_start,GD_alpha):
    # Initialize xs
    xs = np.empty((n + 1, 2)) 
    xs[0] = x_start
    # Get gradient at start location (df/dx or grad(f))
    for i in range(n):
        gs = dfdx(xs[i])
        # Compute search direction and magnitude (dx)
        #  with dx = - grad but no line searching
        xs[i + 1] = xs[i] - np.dot(GD_alpha, dfdx(xs[i]))

    return xs
    def __init__(self, X_L, y_L, X_H, y_H):
        self.D = X_H.shape[1]
        self.X_L = X_L
        self.y_L = y_L
        self.X_H = X_H
        self.y_H = y_H
        self.L = np.empty([0,0])

        self.hyp = self.init_params()
        print("Total number of parameters: %d" % (self.hyp.shape[0]))

        self.jitter = 1e-8
def gradient_descent(m, lambda_flows, grad_energy_bound, samples):
    '''
        Gradient descent for finding parameters. This may not work anymore since switching over
        to the Adam optimizer.
    '''
    energy_hist = np.empty(m)
    joint_hist = np.empty(m)
    flow_hist = np.empty(m)
    lambda_hist = np.empty((m, *lambda_flows.shape))
    samples_flowed = samples
    for i in tqdm(range(m)):
        beta = min(1, 0.01 + i / 10000)
        samples_flowed = flow_samples(lambda_flows, samples, h)

        gradient = grad_energy_bound(lambda_flows, samples, h, beta)
        lambda_flows -= step_size * gradient
        #lambda_flows = autograd.misc.optimizers.adam(grad_energy_bound, lambda_flows)

        # Debug
        energy_hist[i] = energy_bound(lambda_flows, samples, h)
        joint_hist[i] = get_joint_exp(lambda_flows, samples, h)
        flow_hist[i] = get_flow_exp(lambda_flows, samples, h)
        lambda_hist[i] = lambda_flows

        # Plot
        if i % 20 == 0:
            if (i == 0):
                leading_zeros = int(np.log(m) / np.log(10))
            elif (i == 1000):
                leading_zeros = int(np.log(m) / np.log(10)) - int(
                    np.log(i) / np.log(10)) - 1
            else:
                leading_zeros = int(np.log(m) / np.log(10)) - int(
                    np.log(i) / np.log(10))
            zeros = '0' * leading_zeros

            ax = setup_plot(u_func)
            ax.scatter(samples_flowed[:, 0], samples_flowed[:, 1], alpha=.5)
            plt.savefig("./plots/{}{}.png".format(zeros, i))
            plt.close()
Beispiel #30
0
    def _ntied_transmat(self, transmat_val):  # TODO: document choices

#                        +-----------------+
#                        |a|1|0|0|0|0|0|0|0|
#                        +-----------------+
#                        |0|a|1|0|0|0|0|0|0|
#                        +-----------------+
#   +---+---+---+        |0|0|a|b|0|0|c|0|0|
#   | a | b | c |        +-----------------+
#   +-----------+        |0|0|0|e|1|0|0|0|0|
#   | d | e | f | +----> +-----------------+
#   +-----------+        |0|0|0|0|e|1|0|0|0|
#   | g | h | i |        +-----------------+
#   +---+---+---+        |d|0|0|0|0|e|f|0|0|
#                        +-----------------+
#                        |0|0|0|0|0|0|i|1|0|
#                        +-----------------+
#                        |0|0|0|0|0|0|0|i|1|
#                        +-----------------+
#                        |g|0|0|h|0|0|0|0|i|
#                        +-----------------+
# for a model with n_unique = 3 and n_tied = 2


        transmat = np.empty((0, self.n_components))
        for r in range(self.n_unique):
            row = np.empty((self.n_chain, 0))
            for c in range(self.n_unique):
                if r == c:
                    subm = np.array(sp.diags([transmat_val[r, c],
                                    1 - transmat_val[r, c]], [0, 1],
                                    shape=(self.n_chain,
                                           self.n_chain)).todense())
                else:
                    lower_left = np.zeros((self.n_chain, self.n_chain))
                    lower_left[self.n_tied, 0] = 1.0
                    subm = np.kron(transmat_val[r, c], lower_left)
                row = np.hstack((row, subm))
            transmat = np.vstack((transmat, row))
        return transmat
Beispiel #31
0
    def _ntied_transmat(self, transmat_val):  # TODO: document choices

        #                        +-----------------+
        #                        |a|1|0|0|0|0|0|0|0|
        #                        +-----------------+
        #                        |0|a|1|0|0|0|0|0|0|
        #                        +-----------------+
        #   +---+---+---+        |0|0|a|b|0|0|c|0|0|
        #   | a | b | c |        +-----------------+
        #   +-----------+        |0|0|0|e|1|0|0|0|0|
        #   | d | e | f | +----> +-----------------+
        #   +-----------+        |0|0|0|0|e|1|0|0|0|
        #   | g | h | i |        +-----------------+
        #   +---+---+---+        |d|0|0|0|0|e|f|0|0|
        #                        +-----------------+
        #                        |0|0|0|0|0|0|i|1|0|
        #                        +-----------------+
        #                        |0|0|0|0|0|0|0|i|1|
        #                        +-----------------+
        #                        |g|0|0|h|0|0|0|0|i|
        #                        +-----------------+
        # for a model with n_unique = 3 and n_tied = 2

        transmat = np.empty((0, self.n_components))
        for r in range(self.n_unique):
            row = np.empty((self.n_chain, 0))
            for c in range(self.n_unique):
                if r == c:
                    subm = np.array(
                        sp.diags([transmat_val[r, c], 1 - transmat_val[r, c]],
                                 [0, 1],
                                 shape=(self.n_chain, self.n_chain)).todense())
                else:
                    lower_left = np.zeros((self.n_chain, self.n_chain))
                    lower_left[self.n_tied, 0] = 1.0
                    subm = np.kron(transmat_val[r, c], lower_left)
                row = np.hstack((row, subm))
            transmat = np.vstack((transmat, row))
        return transmat
Beispiel #32
0
def alleged_BFGS(n, f, dfdx,x_start, TEMP_B0, BFGS_alpha):
    # Initialize delta_xq and gamma
    delta_xq = np.zeros((2, 1))
    gamma = np.zeros((2, 1))
    part1 = np.zeros((2, 2))
    part2 = np.zeros((2, 2))
    part3 = np.zeros((2, 2))
    part4 = np.zeros((2, 2))
    part5 = np.zeros((2, 2))
    part6 = np.zeros((2, 1))
    part7 = np.zeros((1, 1))
    part8 = np.zeros((2, 2))
    part9 = np.zeros((2, 2))
    # Initialize xq
    xq = np.empty((n + 1, 2)) 
    xq[0] = x_start
    # Initialize gradient storage
    g = np.zeros((n + 1, 2))
    g[0] = dfdx(xq[0])
    # Initialize hessian storage
    h = np.zeros((n + 1, 2, 2))
    h[0] = TEMP_B0
    for i in range(n):

        search_dirn = np.linalg.solve(h[i], g[i])
        # Compute search direction and magnitude (dx)
        #  with dx = -alpha * inv(h) * grad
        delta_xq = -np.dot(BFGS_alpha[i], np.linalg.solve(h[i], g[i]))
        # delta_xq = - np.linalg.solve(h[i], g[i])

        xq[i + 1] = xq[i] + delta_xq

        # Get gradient update for next step
        g[i + 1] = dfdx(xq[i + 1])

        # Get hessian update for next step
        gamma = g[i + 1] - g[i]
        part1 = np.outer(gamma, gamma)
        part2 = np.outer(gamma, delta_xq)
        part3 = np.dot(np.linalg.pinv(part2), part1)

        part4 = np.outer(delta_xq, delta_xq)
        part5 = np.dot(h[i], part4)
        part6 = np.dot(part5, h[i])
        part7 = np.dot(delta_xq, h[i])
        part8 = np.dot(part7, delta_xq)
        part9 = np.dot(part6, 1 / part8)

        h[i + 1] = h[i] + part3 - part9

    return xq
Beispiel #33
0
    def grassman_cost_function(self, W):
        new_X = np.dot(self.db['Dloader'].X, W)
        σ = self.db['Dloader'].σ
        γ = self.db['compute_γ']()

        #	compute gaussian kernel
        bs = new_X.shape[0]
        K = np.empty((0, bs))
        for i in range(bs):
            Δx = new_X[i, :] - new_X
            exp_val = -np.sum(Δx * Δx, axis=1) / (2 * σ * σ)
            K = np.vstack((K, e**(exp_val)))

        return -np.sum(γ * K)
Beispiel #34
0
    def __init__(self, order, precision, reg=5e-3, process_noise=.01, loss_threshold=.001, color=-1):
        super(Bezier, self).__init__()

        self.bernstein = get_bernstein(precision=precision, order=order)

        self.controls = np.empty((order, 2))

        self.cloud = None
        self.reg = reg

        self.loss_threshold = loss_threshold
        self.filter = KalmanFilter(dimension=order, process_noise=process_noise)

        self.color = COLORS[color]
Beispiel #35
0
def test_multivariate_normal_logpdf_unique_params(D=10):
    # Test broadcasting over datapoints and corresponding parameters
    leading_ndim = npr.randint(1, 4)
    shp = npr.randint(1, 10, size=leading_ndim)
    x = npr.randn(*shp, D)
    mu = npr.randn(*shp, D)
    L = npr.randn(*shp, D, D)
    Sigma = np.matmul(L, np.swapaxes(L, -1, -2))

    ll1 = multivariate_normal_logpdf(x, mu, Sigma)
    ll2 = np.empty(shp)
    for inds in product(*[np.arange(s) for s in shp]):
        ll2[inds] = mvn.logpdf(x[inds], mu[inds], Sigma[inds])
    assert np.allclose(ll1, ll2)
def run_dataset(prob_label):
    """Run the experiment"""
    list_ss = get_sample_source_list(prob_label)
    dimensions = [ss.dim() for ss in list_ss]

    # ///////  submit jobs //////////
    # create folder name string
    home = os.path.expanduser("~")
    foldername = os.path.join(home, "freqopttest_slurm", 'e%d'%ex)
    logger.info("Setting engine folder to %s" % foldername)

    # create parameter instance that is needed for any batch computation engine
    logger.info("Creating batch parameter instance")
    batch_parameters = BatchClusterParameters(
        foldername=foldername, job_name_base="e%d_"%ex, parameter_prefix="")

    # Use the following line if Slurm queue is not used.
    #engine = SerialComputationEngine()
    engine = SlurmComputationEngine(batch_parameters)
    n_methods = len(method_job_funcs)
    # repetitions x len(dimensions) x #methods
    aggregators = np.empty((reps, len(dimensions), n_methods ), dtype=object)
    for r in range(reps):
        for di, d in enumerate(dimensions):
            for mi, f in enumerate(method_job_funcs):
                # name used to save the result
                func_name = f.__name__
                fname = '%s-%s-J%d_r%d_n%d_d%d_a%.3f_trp%.2f.p' \
                    %(prob_label, func_name, J, r, sample_size, d, alpha, tr_proportion)
                if not is_rerun and glo.ex_file_exists(ex, prob_label, fname):
                    logger.info('%s exists. Load and return.'%fname)
                    test_result = glo.ex_load_result(ex, prob_label, fname)

                    sra = SingleResultAggregator()
                    sra.submit_result(SingleResult(test_result))
                    aggregators[r, di, mi] = sra
                else:
                    # result not exists or rerun
                    job = Ex2Job(SingleResultAggregator(), list_ss[di],
                            prob_label, r, f)
                    agg = engine.submit_job(job)
                    aggregators[r, di, mi] = agg

    # let the engine finish its business
    logger.info("Wait for all call in engine")
    engine.wait_for_all()

    # ////// collect the results ///////////
    logger.info("Collecting results")
    test_results = np.empty((reps, len(dimensions), n_methods), dtype=object)
    for r in range(reps):
        for di, d in enumerate(dimensions):
            for mi, f in enumerate(method_job_funcs):
                logger.info("Collecting result (%s, r=%d, d=%d)" % (f.__name__, r, d))
                # let the aggregator finalize things
                aggregators[r, di, mi].finalize()

                # aggregators[i].get_final_result() returns a SingleResult instance,
                # which we need to extract the actual result
                test_result = aggregators[r, di, mi].get_final_result().result
                test_results[r, di, mi] = test_result

    func_names = [f.__name__ for f in method_job_funcs]
    func2labels = exglobal.get_func2label_map()
    method_labels = [func2labels[f] for f in func_names if f in func2labels]
    # save results 
    results = {'test_results': test_results, 'dimensions': dimensions, 
            'alpha': alpha, 'J': J, 'list_sample_source': list_ss, 
            'tr_proportion': tr_proportion, 'method_job_funcs': method_job_funcs, 
            'prob_label': prob_label, 'sample_size': sample_size, 
            'method_labels': method_labels}
    
    # class name 
    fname = 'ex2-%s-me%d_J%d_rs%d_n%d_dmi%d_dma%d_a%.3f_trp%.2f.p' \
        %(prob_label, n_methods, J, reps, sample_size, min(dimensions),
                max(dimensions), alpha, tr_proportion)
    glo.ex_save_result(ex, results, fname)
    logger.info('Saved aggregated results to %s'%fname)
Beispiel #37
0
 def diagonal_covariance(self, test_points):
     ret = np.empty((test_points.shape[0], 1))
     for i in xrange(test_points.shape[0]):
         _, cov = self.no_obs_posterior(test_points[i, :][None, :])
         ret[i, 0] = cov
     return ret
Beispiel #38
0
def cross(a,b):
    out = np.empty(3)
    out[0] = a[1]*b[2] - a[2]*b[1]
    out[1] = a[2]*b[0] - a[0]*b[2]
    out[2] = a[0]*b[1] - a[1]*b[0]
    return out