def generate_pvi_vs_vi(mdp, init):
    print('\nRunning PVI vs VI')
    lr = 0.01

    # pvi
    core_init = random_parameterised_matrix(2, 2, 32, 4)
    core_init = approximate(init, core_init)
    params = utils.solve(parameterised_value_iteration(mdp, lr/len(core_init)), core_init)
    vs = np.vstack([np.max(build(c), axis=1) for c in params])
    m = vs.shape[0]
    plt.scatter(vs[0, 0], vs[0, 1], c='r', label='pvi')
    plt.scatter(vs[1:-1, 0], vs[1:-1, 1], c=range(m-2), cmap='autumn', s=10)
    plt.scatter(vs[-1, 0], vs[-1, 1], c='r', marker='x')

    # vi
    qs = utils.solve(value_iteration(mdp, lr), init)
    vs = np.vstack([np.max(q, axis=1) for q in qs])
    n = vs.shape[0]
    plt.scatter(vs[0, 0], vs[0, 1], c='g', label='vi')
    plt.scatter(vs[1:-1, 0], vs[1:-1, 1], c=range(n-2), cmap='spring', s=10)
    plt.scatter(vs[-1, 0], vs[-1, 1], c='g', marker='x')

    plt.title('VI: {}, PVI {}'.format(n, m))
    plt.legend()
    # plt.colorbar()

    plt.savefig('figs/vi-vs-pvi.png', dpi=300)
    plt.close()
def generate_mpvi_vs_mvi(mdp, init):
    print('\nRunning MPVI vs MVI')
    lr = 1e-2
    # mpvi
    core_init = random_parameterised_matrix(2, 2, 32, 8)
    core_init = approximate(init, core_init)
    c_init = (core_init, [np.zeros_like(c) for c in core_init])
    params = utils.solve(momentum_bundler(parameterised_value_iteration(mdp, lr), 0.9), c_init)
    vs = np.vstack([np.max(build(c[0]), axis=-1) for c in params])
    m = vs.shape[0]

    plt.scatter(vs[0, 0], vs[0, 1], c='r', label='mpvi')
    plt.scatter(vs[1:-1, 0], vs[1:-1, 1], c=range(m-2), cmap='autumn', s=10)
    plt.scatter(vs[-1, 0], vs[-1, 1], c='r', marker='x')

    # mvi
    init = (init, np.zeros_like(init))
    qs = utils.solve(momentum_bundler(value_iteration(mdp, lr), 0.9), init)
    vs = np.vstack([np.max(q[0], axis=-1) for q in qs])
    n = vs.shape[0]

    plt.scatter(vs[0, 0], vs[0, 1], c='g', label='mvi')
    plt.scatter(vs[1:-1, 0], vs[1:-1, 1], c=range(n-2), cmap='spring', s=10)
    plt.scatter(vs[-1, 0], vs[-1, 1], c='g', marker='x')


    plt.title('MVI: {}, MPVI {}'.format(n, m))
    plt.legend()

    plt.savefig('figs/mpvi-vs-pvi.png')
    plt.close()
示例#3
0
    def act(self, state):
        while self.T.num_leaves() < self.target_num_leaves:
            state_node = self.T.softmax_sample_leaves()
            state = state_node.state
            noise_state = state_node.noise_state
            noise_state, action = dampedSpringNoiseStep(state.noise_state)
            state_est = state + self.dynamics.predict(np.vstack(state, action))
            reward_est = self.reward.predict(np.vstack(state_est, action))
            value_est = self.value.predict(state_est)

            new_state_node = nodeData(
                action_inbound=action,
                state=state_est,
                noise_state=noise_state,
                value_est=value_est,
                step_reward=reward_est,
            )

            self.T.add_state(
                parent=state_node,
                child=new_state_node,
            )
        target_node = self.T.softmax_sample_leaf_states()
        next_node = self.T.step_toward_and_reroot(target_node)
        action = next_node.action
示例#4
0
def test_multiply():
    x = np.linspace(-2.5, 15, 5)[:, np.newaxis].astype(np.float32)
    y = randn(x.size)[:, np.newaxis].astype(np.float32)

    gk_x = GaussianKernel(0.1)

    x_e1 = FiniteVec.construct_RKHS_Elem(gk_x, x)
    x_e2 = FiniteVec.construct_RKHS_Elem(gk_x, y)
    x_fv = FiniteVec(gk_x,
                     np.vstack([x, y]),
                     prefactors=np.hstack([x_e1.prefactors] * 2),
                     points_per_split=x.size)

    oper_feat_vec = FiniteVec(gk_x, x)

    oper = FiniteOp(oper_feat_vec, oper_feat_vec, np.eye(len(x)))
    res_e1 = multiply(oper, x_e1)
    res_e2 = multiply(oper, x_e2)
    res_v = multiply(oper, x_fv)
    assert np.allclose(
        res_e1.prefactors,
        (oper.matr @ oper.inp_feat.inner(x_e1)
         ).flatten()), "Application of operator to RKHS element failed."
    assert np.allclose(
        res_v.inspace_points,
        np.vstack([res_e1.inspace_points, res_e2.inspace_points])
    ), "Application of operator to all vectors in RKHS vector failed at inspace points."
    assert np.allclose(
        res_v.prefactors, np.hstack([
            res_e1.prefactors, res_e2.prefactors
        ])), "Application of operator to all vectors in RKHS vector failed."
    assert np.allclose(
        multiply(oper, oper).matr, oper.inp_feat.inner(
            oper.outp_feat)), "Application of operator to operator failed."
示例#5
0
def make_n_step_data_from_DDQN(S, A, C, n, stdizer, params, key=randKey()):
    S_0_th = angle_normalize(S[0, :-n])
    S_0_thdot = S[1, 0:-n]

    S_n_th = angle_normalize(S[0, n:])
    S_n_thdot = S[1, n:]

    A_0 = A[:-n]
    A_n = A[n:]

    stdizer.observe_reward_vec(C)
    R = jnp.vstack(
        [stdizer.standardize_reward(C[m:-(n - m)]) for m in range(n)])

    episode = jnp.vstack([
        jnp.cos(S_0_th),
        jnp.sin(S_0_th),
        S_0_thdot,
        A_0,
        jnp.cos(S_n_th),
        jnp.sin(S_n_th),
        S_n_thdot,
        A_n,
        R,
    ])
    return episode
示例#6
0
def generate_nested_circles(key,
                            n_samples,
                            inner_radius=2,
                            outer_radius=4,
                            noise=0.15):

    k1, k2, k3, k4 = random.split(key, 4)

    # Generate the circles
    inner_t = random.uniform(k1, shape=(n_samples // 2, )) * 2 * jnp.pi
    inner_circle = inner_radius * jnp.vstack(
        [jnp.cos(inner_t), jnp.sin(inner_t)])

    outer_t = random.uniform(k2, shape=(n_samples // 2, )) * 2 * jnp.pi
    outer_circle = outer_radius * jnp.vstack(
        [jnp.cos(outer_t), jnp.sin(outer_t)])

    data = jnp.vstack([inner_circle.T, outer_circle.T])

    # Keep track of the labels
    y = jnp.hstack([jnp.zeros(n_samples // 2), jnp.ones(n_samples // 2)])

    # Shuffle the data
    idx = jnp.arange(n_samples)
    idx = random.permutation(k3, idx)
    data = data[idx]
    y = y[idx]

    data += random.normal(k4, data.shape) * noise
    return data, y
示例#7
0
def bo(key,
       x_obs,
       y_obs,
       obj,
       params_init,
       params_bounds,
       search_space,
       num_bo_rounds,
       acquisition_fn=thompson_sampling,
       batch_size=1,
       num_points=2000,
       num_steps=1000,
       method='tfp'):
  """Run seq-batch Bayesian Optimization (BO)."""
  gp_util = gp.GPUtils()
  additional_info_dict = {}
  for i in range(num_bo_rounds//batch_size):
    key_loop = jax.random.fold_in(key, i)
    gaussian_process = gp_util.fit_gp(
        x_obs, y_obs, params_init, params_bounds, steps=num_steps)

    x_new = acquisition_fn(key_loop,
                           x_obs,
                           y_obs,
                           gaussian_process,
                           gp_util,
                           search_space,
                           batch_size=batch_size,
                           num_points=num_points,
                           method=method)
    y_new, additional_info = obj(x_new)
    additional_info_dict[i] = additional_info
    x_obs = jnp.vstack((x_obs, x_new))
    y_obs = jnp.vstack((y_obs, y_new))
  return x_obs, y_obs, additional_info_dict
示例#8
0
        def _logG(Xp, X, parameter_dict, t):
            xp, x = Xp['x'], X['x']

            #compute importance_weight (there is 1 more index in the IW parameters than in everything else...)
            mu_t, mu_tm1 = self.IW_parameters['mu'][
                t + 1], self.IW_parameters['mu'][t]
            cov_force_t, cov_force_tm1 = jnp.exp(
                self.IW_parameters['lcov_force'][t + 1]), jnp.exp(
                    self.IW_parameters['lcov_force'][t])

            IWs = -self._IW_energy_fn(x, jnp.vstack(
                (mu_t, cov_force_t))) + self._IW_energy_fn(
                    xp, jnp.vstack((mu_tm1, cov_force_tm1)))

            #compute M_kernel_logp: we don't need to recompute this
            k_t_logps = normalized_Gaussian_logp(x, X['forward_mu'],
                                                 X['forward_cov'])

            #compute l_kernel_logp
            Lparams = parameter_dict['L_parameters']
            potential_params = jnp.vstack(
                (Lparams['mu'][t], jnp.exp(Lparams['lcov_force'][t])))
            l_mu, l_cov = EL_mu_sigma(x, self._kernel_energy_fn,
                                      parameter_dict['ldt'], potential_params)
            l_tm1_logps = normalized_Gaussian_logp(xp, l_mu, l_cov)

            #finalize and return
            lws = IWs + l_tm1_logps - k_t_logps
            return lws
示例#9
0
def flux_through_all_loops(r_surf, ll, dl, I_arr):
    """
    returns the magnetic flux through all poloidal and toroidal loops
    of the surface array.
    Arguments:
    *r_surf*: (n, m, 3) array of cartesian coordinates on the surface.
    [:,0,:] must be the first poloidal loop, and [0,:,:] the first toroidal loop.
    *dl*: ( n_coils, nsegments, 3)-array along the coil segments.
    other coil line segment
    *l* ( n_coils, nsegments, 3)-array of the position of each coil segment
    returns:
    *polint*: n-array of poloidal fluxes
    *torint*: m-array of toroidal fluxes
    """
    mapped_vector_potential = vmap(
        vmap(vector_potential, (0, None, None, None), 0),
        (1, None, None, None), 1)
    vec_surf = mapped_vector_potential(r_surf, ll, dl, I_arr)
    dl_pol = np.hstack((r_surf[:, 1:, :] - r_surf[:, :-1, :],
                        (r_surf[:, 0, :] - r_surf[:, -1, :])[:, None, :]))
    A_midpol = 0.5 * np.hstack(
        (vec_surf[:, 1:, :] + vec_surf[:, :-1, :],
         (vec_surf[:, 0, :] + vec_surf[:, -1, :])[:, None, :]))
    # sum over axis 0 and 2 only, the product of the two. Calculates the inner product, and then "integrates" poloidally.
    polint = np.einsum('ijk, ijk->i', dl_pol, A_midpol)

    dl_tor = np.vstack((r_surf[1:, :, :] - r_surf[:-1, :, :],
                        (r_surf[0, :, :] - r_surf[-1, :, :])[None, :, :]))
    A_midtor = 0.5 * np.vstack(
        (vec_surf[1:, :, :] + vec_surf[:-1, :, :],
         (vec_surf[0, :, :] + vec_surf[-1, :, :])[None, :, :]))
    torint = np.einsum('ijk, ijk->j', dl_tor, A_midtor)
    return polint, torint
示例#10
0
    def get_labels_and_idxs_bbox(self, outputs, targets, indices, num_boxes):
        """
        Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss.
        Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes
        are expected in format (center_x, center_y, w, h), normalized by the image size.
        """
        bbx_idx = self._get_src_permutation_idx(indices)
        B, N, _ = outputs['pred_boxes'].shape
        target_boxes = jnp.concatenate(
            [jnp.zeros((B, N, 2)), jnp.ones((B, N, 2))], -1)  # [[0,0,1,1]]

        box_loss_mask = jnp.zeros(outputs['pred_boxes'].shape[:2])

        target_boxes_o = jnp.concatenate(
            [t["boxes"][np.array(i)] for t, (_, i) in zip(targets, indices)],
            axis=0)
        box_loss_indices = jnp.ones(len(target_boxes_o))

        target_boxes = self.scatter_nd(target_boxes,
                                       jnp.vstack(bbx_idx).astype(jnp.int32).T,
                                       target_boxes_o)
        box_loss_mask = self.scatter_nd(
            box_loss_mask,
            jnp.vstack(bbx_idx).astype(jnp.int32).T, box_loss_indices)

        return target_boxes, box_loss_mask
def generate_PG_vs_VI(mdp, init):
    print('\nRunning PG vs VI')
    lr = 0.001

    # PG
    logits = utils.solve(policy_gradient_iteration_logits(mdp, lr), init)
    vs = np.vstack([value_functional(mdp.P, mdp.r, softmax(logit), mdp.discount).T for logit in logits])
    n = vs.shape[0]
    plt.scatter(vs[0, 0], vs[0, 1], c='g', label='PG')
    plt.scatter(vs[1:-1, 0], vs[1:-1, 1], c=range(n-2), cmap='spring', s=10)
    plt.scatter(vs[-1, 0], vs[-1, 1], c='g', marker='x')

    # VI
    v = value_functional(mdp.P, mdp.r, softmax(init), mdp.discount)
    init = np.einsum('ijk,jl->jk', mdp.P, v)  # V->Q
    qs = utils.solve(value_iteration(mdp, lr), init)
    vs = np.vstack([np.max(q, axis=1) for q in qs])
    m = vs.shape[0]
    plt.scatter(vs[0, 0], vs[0, 1], c='r', label='VI')
    plt.scatter(vs[1:-1, 0], vs[1:-1, 1], c=range(m-2), cmap='autumn', s=10)
    plt.scatter(vs[-1, 0], vs[-1, 1], c='r', marker='x')
    plt.legend()
    plt.title('PG: {}, VI {}'.format(n, m))
    # plt.colorbar()

    plt.savefig('figs/pg-vs-vi.png')
    plt.close()
    def dynamics_fwd(cls, s, v_m):

        v_m = np.clip(v_m, -cls.v_mx, cls.v_mx)

        x, theta, x_dot, theta_dot = s

        # Compute force acting on the cart:
        F = ((cls.eta_g * cls.Kg * cls.eta_m * cls.Kt) / (cls.Rm * cls.r_mp) *
             (-cls.Kg * cls.Km * x_dot / cls.r_mp + cls.eta_m * v_m))

        # Compute acceleration:
        A11 = cls.mp + cls.Jeq
        A12 = cls.mp * cls.pl * np.cos(theta)
        A21 = cls.mp * cls.pl * np.cos(theta)
        A22 = cls.Jp + cls.mp * cls.pl**2

        b11 = F - cls.Beq * x_dot - cls.mp * cls.pl * np.sin(
            theta) * theta_dot**2
        b21 = 0. - cls.Bp * theta_dot - cls.mp * cls.pl * cls.g * np.sin(theta)

        A = np.vstack((np.hstack((A11, A12)), np.hstack((A21, A22))))

        b = np.vstack((b11, b21))

        Ainv = np.linalg.inv(A)
        s_ddot = np.dot(Ainv, b).squeeze()
        s_vel = np.hstack((x_dot, theta_dot)) + s_ddot * cls.dt
        s_pos = np.hstack((x, theta)) + s_vel * cls.dt
        s_next = np.hstack((s_pos, s_vel))
        return s_next
def generate_PG_vs_PPG(mdp, init):
    print('\nRunning PG vs PPG')
    lr = 0.1

    # PPG
    core_init = random_parameterised_matrix(2, 2, 32, 8)
    core_init = approximate(init, core_init)
    all_params = utils.solve(parameterised_policy_gradient_iteration(mdp, lr/len(core_init)), core_init)
    vs = np.vstack([np.max(value_functional(mdp.P, mdp.r, softmax(build(params), axis=-1), mdp.discount), axis=1)
                    for params in all_params])
    m = vs.shape[0]
    plt.scatter(vs[0, 0], vs[0, 1], c='g', label='PPG')
    plt.scatter(vs[1:-1, 0], vs[1:-1, 1], c=range(m-2), cmap='spring', s=10)
    plt.scatter(vs[-1, 0], vs[-1, 1], c='g', marker='x')

    # PG
    logits = utils.solve(policy_gradient_iteration_logits(mdp, lr), init)
    vs = np.vstack([np.max(value_functional(mdp.P, mdp.r, softmax(logit, axis=-1), mdp.discount), axis=1) for logit in logits])
    n = vs.shape[0]
    plt.scatter(vs[0, 0], vs[0, 1], c='r', label='PG')
    plt.scatter(vs[1:-1, 0], vs[1:-1, 1], c=range(n-2), cmap='autumn', s=10)
    plt.scatter(vs[-1, 0], vs[-1, 1], c='r', marker='x')

    plt.title('PG: {}, PPG {}'.format(n, m))
    plt.legend()
    # plt.colorbar()

    plt.savefig('figs/pg-vs-ppg.png', dpi=300)
    plt.close()
def generate_vi_sgd_vs_mom(mdp, init, lr=0.01):
    print('\nRunning VI SGD vs Mom')

    # sgd
    qs = utils.solve(value_iteration(mdp, lr), init)
    qs = clipped_stack(qs,1000)
    vs = np.vstack([np.max(q, axis=1) for q in qs])
    n = vs.shape[0]
    plt.scatter(vs[0, 0], vs[0, 1], c='m', label='gd')
    plt.scatter(vs[1:-1, 0], vs[1:-1, 1], c=range(n-2), cmap='spring', s=10)
    plt.scatter(vs[-1, 0], vs[-1, 1], c='m', marker='x')

    # momentum
    init = (init, np.zeros_like(init))
    qs = utils.solve(momentum_bundler(value_iteration(mdp, lr), 0.99), init)
    qs = clipped_stack(qs,1000)
    vs = np.vstack([np.max(q[0], axis=1) for q in qs])
    m = vs.shape[0]
    plt.scatter(vs[0, 0], vs[0, 1], c='r', label='momentum')
    plt.scatter(vs[1:-1, 0], vs[1:-1, 1], c=range(m-2), cmap='autumn', s=10)
    plt.scatter(vs[-1, 0], vs[-1, 1], c='r', marker='x')

    plt.title('SGD: {}, Mom {}, Lr: {}'.format(n, m, lr))
    plt.legend()

    plt.savefig('figs/vi_sgd-vs-vi_mom_{}.png'.format(lr))
    plt.close()
示例#15
0
def generate_sample_grid(theta_mean, theta_std, n):
    """
    Create a meshgrid of n ** n_dim samples,
    tiling [theta_mean[i] - 5 * theta_std[i], theta_mean[i] + 5 * theta_std]
    into n portions.
    Also returns the volume element.

    Parameters
    ----------
    theta_mean, theta_std : ndarray (n_dim)

    Returns
    -------
    theta_samples : ndarray (nobj, n_dim)

    vol_element: scalar
        Volume element

    """
    n_components = theta_mean.size
    xs = [
        np.linspace(
            theta_mean[i] - 5 * theta_std[i],
            theta_mean[i] + 5 * theta_std[i],
            n,
        )
        for i in range(n_components)
    ]
    mxs = np.meshgrid(*xs)
    orshape = mxs[0].shape
    mxsf = np.vstack([i.ravel() for i in mxs]).T
    dxs = np.vstack([np.diff(xs[i])[i] for i in range(n_components)])
    vol_element = np.prod(dxs)
    theta_samples = np.vstack(mxsf)
    return theta_samples, vol_element
def generate_mppg_vs_mpg(mdp, init):
    print('\nRunning MPG vs MPPG')
    lr = 0.001

    # MPPG
    core_init = random_parameterised_matrix(2, 2, 32, 8)
    core_init = approximate(init, core_init)
    core_init = (core_init, [np.zeros_like(c) for c in core_init])
    all_params = utils.solve(momentum_bundler(parameterised_policy_gradient_iteration(mdp, lr), 0.9), core_init)
    vs = np.vstack([np.max(value_functional(mdp.P, mdp.r, softmax(build(params), axis=-1), mdp.discount), axis=1)
                    for params, mom in all_params])
    m = vs.shape[0]
    plt.scatter(vs[0, 0], vs[0, 1], c='g', label='MPPG')
    plt.scatter(vs[1:-1, 0], vs[1:-1, 1], c=range(m-2), cmap='spring', s=10)
    plt.scatter(vs[-1, 0], vs[-1, 1], c='g', marker='x')

    # MPG
    init = (init, np.zeros_like(init))
    logits = utils.solve(momentum_bundler(policy_gradient_iteration_logits(mdp, lr), 0.9), init)
    vs = np.vstack([np.max(value_functional(mdp.P, mdp.r, softmax(logit, axis=-1), mdp.discount), axis=1) for logit, mom in logits])
    n = vs.shape[0]
    plt.scatter(vs[0, 0], vs[0, 1], c='r', label='MPG')
    plt.scatter(vs[1:-1, 0], vs[1:-1, 1], c=range(n-2), cmap='autumn', s=10)
    plt.scatter(vs[-1, 0], vs[-1, 1], c='r', marker='x')

    plt.title('PG: {}, PPG {}'.format(n, m))
    plt.legend()
    # plt.colorbar()

    plt.savefig('figs/mpg-vs-mppg.png', dpi=300)
    plt.close()
def fill_cov(S, dim):
    m, m = S.shape
    S_eye = jnp.identity(dim - m)
    S_fill = jnp.zeros((m, dim - m))
    S_fill_left = jnp.vstack((S_fill, S_eye))
    S_final = jnp.vstack((S, S_fill.T))
    S_final = jnp.hstack((S_final, S_fill_left))
    return S_final
示例#18
0
def hits(adjMatrix, p: int = 100):
    """
    Calculate the hub and authority score in a net

    Parameters
    -------------------
    adjMatrix: a numpy array NxN.

    p: int. Default 100. The max iteration.

    Returns
    -------------------
    hub: dict. The hub score for each node in the net.
    authority: dict. The authority score for each node in the net.
    h: numpy array Nxp. (Optional). The hub score for each node in the net for each algorithm's step.
    a: numpy array Nxp. (Optional). The authority score for each node in the net for each algorithm's step.

    Example
    -------------------
    >> import numpy as np
    >> from influencer.lazy_centrality import hits

    Create an adjiacency matrix with numpy

    >> adjM = np.random.rand(10, 10)
    >> adjM[adjM>0.5]=1
    >> adjM[adjM<=0.5]=0
    >> hub, aut, _, _ = hits(adjMatrix = adjM)
    """
    
    n = adjMatrix.shape[0]
    
    a = np.ones([1,n])
    h = np.ones([1,n])
    
    pa=a
    #ph=h
    
    authority = {}
    hub = {}
    
    for k in range(1,p):
        h1 = np.dot(adjMatrix, pa.T)/np.linalg.norm(np.dot(adjMatrix, pa.T))
        a1 = np.dot(adjMatrix.T, h1)/np.linalg.norm(np.dot(adjMatrix.T , h1))
    
        h = np.vstack((h,np.dot(adjMatrix, a[k-1,:].T)/np.linalg.norm(np.dot(adjMatrix, a[k-1,:].T))))
        a = np.vstack((a,np.dot(adjMatrix.T, h[k,:].T)/np.linalg.norm(np.dot(adjMatrix.T, h[k,:].T))))
    
        pa = a1.T
        #ph = h1.T
        
    for i in range(n):
        authority[str(i)] = a[-1,i]
        hub[str(i)] = h[-1,i]
    
    return hub, authority, h, a
示例#19
0
文件: roc_auc.py 项目: jtamanas/LBI
def distinguish_samples(a_samples, b_samples, classifier_params, logit_d):
    a_labels = np.zeros((a_samples.shape[0], 1))
    b_labels = np.ones((b_samples.shape[0], 1))
    samples = np.vstack([a_samples, b_samples])
    true_labels = np.vstack([a_labels, b_labels])

    parallel_logit_d = jax.vmap(logit_d, in_axes=(0, None, None))
    pred_labels = parallel_logit_d(classifier_params, samples, samples)
    pred_labels = jax.nn.sigmoid(pred_labels).mean(0)
    return true_labels, pred_labels
示例#20
0
def generate_dataset(n_b, n_c, n_d):
    x_b = domain * 1.0
    u_b = u_fn(x_b)

    x_c = jnp.linspace(*domain[:, 0], n_c).reshape((-1, 1))

    x_d = jnp.linspace(*domain[:, 0], n_d).reshape((-1, 1))
    u_d = u_fn(x_d)

    dirichlet = dataset_Dirichlet(jnp.vstack([x_b, x_d]),
                                  jnp.vstack([u_b, u_d]))
    collocation = dataset_Collocation(jnp.vstack([x_b, x_c]))
    return dirichlet, collocation
示例#21
0
def create_spatiotemporal_grid(X, Y):
    """
    create a grid of data sized [T, R1, R2]
    note that this function removes full duplicates (i.e. where all dimensions match)
    TODO: generalise to >5D
    """
    if Y.ndim < 2:
        Y = Y[:, None]
    num_spatial_dims = X.shape[1] - 1
    if num_spatial_dims == 4:
        sort_ind = nnp.lexsort(
            (X[:, 4], X[:, 3], X[:, 2], X[:, 1], X[:,
                                                   0]))  # sort by 0, 1, 2, 4
    elif num_spatial_dims == 3:
        sort_ind = nnp.lexsort(
            (X[:, 3], X[:, 2], X[:, 1], X[:, 0]))  # sort by 0, 1, 2, 3
    elif num_spatial_dims == 2:
        sort_ind = nnp.lexsort((X[:, 2], X[:, 1], X[:, 0]))  # sort by 0, 1, 2
    elif num_spatial_dims == 1:
        sort_ind = nnp.lexsort((X[:, 1], X[:, 0]))  # sort by 0, 1
    else:
        raise NotImplementedError
    X = X[sort_ind]
    Y = Y[sort_ind]
    unique_time = np.unique(X[:, 0])
    unique_space = nnp.unique(X[:, 1:], axis=0)
    N_t = unique_time.shape[0]
    N_r = unique_space.shape[0]
    if num_spatial_dims == 4:
        R = np.tile(unique_space, [N_t, 1, 1, 1, 1])
    elif num_spatial_dims == 3:
        R = np.tile(unique_space, [N_t, 1, 1, 1])
    elif num_spatial_dims == 2:
        R = np.tile(unique_space, [N_t, 1, 1])
    elif num_spatial_dims == 1:
        R = np.tile(unique_space, [N_t, 1])
    else:
        raise NotImplementedError
    R_flat = R.reshape(-1, num_spatial_dims)
    Y_dummy = np.nan * np.zeros([N_t * N_r, 1])
    time_duplicate = np.tile(unique_time, [N_r, 1]).T.flatten()
    X_dummy = np.block([time_duplicate[:, None], R_flat])
    X_all = np.vstack([X, X_dummy])
    Y_all = np.vstack([Y, Y_dummy])
    X_unique, ind = nnp.unique(X_all, axis=0, return_index=True)
    Y_unique = Y_all[ind]
    grid_shape = (unique_time.shape[0], ) + unique_space.shape
    R_grid = X_unique[:, 1:].reshape(grid_shape)
    Y_grid = Y_unique.reshape(grid_shape[:-1] + (1, ))
    return unique_time[:, None], R_grid, Y_grid
示例#22
0
def realfftbasis(nx):
    """
    Basis of sines+cosines for nn-point discrete fourier transform (DFT).
    
    Ported from MatLab code:
    https://github.com/leaduncker/SimpleEvidenceOpt/blob/master/util/realfftbasis.m
    
    """
    import numpy as np
    nn = nx

    ncos = np.ceil((nn + 1) / 2)
    nsin = np.floor((nn - 1) / 2)

    wvec = np.hstack(
        [np.arange(start=0., stop=ncos),
         np.arange(start=-nsin, stop=0.)])

    wcos = wvec[wvec >= 0]
    wsin = wvec[wvec < 0]

    x = np.arange(nx)

    t0 = np.cos(np.outer(wcos * 2 * np.pi / nn, x))
    t1 = np.sin(np.outer(wsin * 2 * np.pi / nn, x))

    B = np.vstack([t0, t1]) / np.sqrt(nn * 0.5)

    return B, wvec
示例#23
0
def conway_graph(size) -> jraph.GraphsTuple:
    """Returns a graph representing the game field of conway's game of life."""
    # Creates nodes: each node represents a cell in the game.
    n_node = size**2
    nodes = np.zeros((n_node, 1))
    node_indices = jnp.arange(n_node)
    # Creates edges, senders and receivers:
    # the senders represent the connections to the 8 neighboring fields.
    n_edge = 8 * n_node
    edges = jnp.zeros((n_edge, 1))
    senders = jnp.vstack([
        node_indices - size - 1, node_indices - size, node_indices - size + 1,
        node_indices - 1, node_indices + 1, node_indices + size - 1,
        node_indices + size, node_indices + size + 1
    ])
    senders = senders.T.reshape(-1)
    senders = (senders + size**2) % size**2
    receivers = jnp.repeat(node_indices, 8)
    # Adds a glider to the game
    nodes[0, 0] = 1.0
    nodes[1, 0] = 1.0
    nodes[2, 0] = 1.0
    nodes[2 + size, 0] = 1.0
    nodes[1 + 2 * size, 0] = 1.0
    return jraph.GraphsTuple(n_node=jnp.array([n_node]),
                             n_edge=jnp.array([n_edge]),
                             nodes=jnp.asarray(nodes),
                             edges=edges,
                             globals=None,
                             senders=senders,
                             receivers=receivers)
示例#24
0
def run_mlstm1900_example():
    # Set up an example
    sequence = "MRKGEELFTGVVPILVELDGDVNGHKFSVRGEGEGDATNGKLTLKFICTTGKLPVPWPTLVTTLTYGVQCFARYPDHMKQHDFFKSAMPEGYVQERTISFKDDGTYKTRAEVKFEGDTLVNRIELKGIDFKEDGNILGHKLEYNFNSHNVYITADKQKNGIKANFKIRHNVEDGSVQLADHYQQNTPIGDGPVLLPDNHYLSTQSVLSKDPNEKRDHMVLLEFVTAAGITHGMDELYK"
    print("sequence length: ", len(sequence))

    sequence = aa_seq_to_int(sequence)[:-1]

    embeddings = np.load("embed_matrix:0.npy")
    x = np.vstack([embeddings[i] for i in sequence])
    print("embedding shape: ", x.shape)

    # x = sliding_window(sequence, size=10)
    params = dict()
    params["gh"] = np.load("rnn_mlstm_mlstm_gh:0.npy")
    params["gmh"] = np.load("rnn_mlstm_mlstm_gmh:0.npy")
    params["gmx"] = np.load("rnn_mlstm_mlstm_gmx:0.npy")
    params["gx"] = np.load("rnn_mlstm_mlstm_gx:0.npy")

    params["wh"] = np.load("rnn_mlstm_mlstm_wh:0.npy")
    params["wmh"] = np.load("rnn_mlstm_mlstm_wmh:0.npy")
    params["wmx"] = np.load("rnn_mlstm_mlstm_wmx:0.npy")
    params["wx"] = np.load("rnn_mlstm_mlstm_wx:0.npy")

    params["b"] = np.load("rnn_mlstm_mlstm_b:0.npy")

    # Pass through mLSTM1900
    out = mlstm1900(params, x)
    print("output: ", out)
    print("reps: ", out.mean(axis=0))
    print("output shape: ", out.shape)
    assert out.shape == (x.shape[0], 1900)
示例#25
0
 def explicit_sigma(aug_y, t, args):
     y, y_adj, arg_adj = unpack(aug_y)
     gval = g(y, t, args)
     jac_g, jac_args = jacobian(g, argnums=(0, 2))(y, t, args)
     sigma_adj = jac_g * -y_adj
     sigma_theta = jac_args.T * -y_adj
     return np.vstack([np.diag(gval), sigma_adj, sigma_theta])
示例#26
0
        def _M(X, parameter_dict, t):
            """
            proposal for a singular particle x
            """
            #grab the parameters
            x = X['x']
            seed = X['seed']
            Dx = x.shape
            ldt = parameter_dict['ldt']

            Mparams = parameter_dict['M_parameters']
            potential_params = jnp.vstack(
                (Mparams['mu'][t], jnp.exp(Mparams['lcov_force'][t])))

            #split the random seed
            run_seed, x_seed = random.split(seed)

            #call EL
            mu, cov = EL_mu_sigma(x, self._kernel_energy_fn, ldt,
                                  potential_params)

            #ula move
            new_x = ULA_move(x, self._kernel_energy_fn, ldt, run_seed,
                             potential_params)

            return {
                'x': new_x,
                'seed': x_seed,
                'forward_mu': mu,
                'forward_cov': cov
            }
示例#27
0
    def test_scan_decorated(self):
        class SimpleScan(nn.Module):
            @partial(nn.scan,
                     variable_broadcast='params',
                     split_rngs={'params': False})
            @nn.compact
            def __call__(self, c, xs):
                return nn.LSTMCell(name="lstm_cell")(c, xs)

        key1, key2 = random.split(random.PRNGKey(0), 2)
        xs = random.uniform(key1, (3, 2))
        dummy_rng = random.PRNGKey(0)
        init_carry = nn.LSTMCell.initialize_carry(dummy_rng, xs.shape[:1],
                                                  xs.shape[-1])
        model = SimpleScan()
        init_variables = model.init(key2, init_carry, xs)
        # simulate scan in python for comparison:
        c = init_carry
        ys = []
        lstmcell_variables = freeze(
            {'params': init_variables['params']['lstm_cell']})
        for i in range(xs.shape[0]):
            c, y = nn.LSTMCell().apply(lstmcell_variables, c, xs[i])
            ys.append(y[None, ...])
        y1 = jnp.vstack(ys)

        c2, y2 = model.apply(init_variables, init_carry, xs)
        np.testing.assert_allclose(y1, y2, atol=1e-7)
        np.testing.assert_allclose(c[0], c2[0], atol=1e-7)
        np.testing.assert_allclose(c[1], c2[1], atol=1e-7)
示例#28
0
def controlled_gate(gate, c_qubit):
    input_qubits = [c_qubit] + gate.input_qubits
    if gate.func is None:
        U = gate.tensor
        sh, _ = U.shape
        I = np.eye(sh)
        zero0 = np.zeros((sh, sh))
        zero1 = np.zeros((sh, sh))
        A = np.hstack([I, zero0])
        B = np.hstack([zero1, U])
        gate = qtc.Gate(input_qubits, tensor=np.vstack([A, B]))
        return gate
    else:

        def func(params):
            U = gate.func(params)
            sh, _ = U.shape
            I = np.eye(sh)
            zero0 = np.zeros((sh, sh))
            zero1 = np.zeros((sh, sh))
            A = np.hstack([I, zero0])
            B = np.hstack([zero1, U])
            return np.vstack([A, B])

        func_jit = jit(func)
        res = qtc.Gate(input_qubits, params=gate.params, func=func_jit)
        return res
  def get_2d_array(self):
    nb_full_blocks = int(self.nb_rows / self.nb_columns)
    block_list = []
    rng = self.key
    for _ in range(nb_full_blocks):
      rng, rng_input = jax.random.split(rng)
      unstructured_block = random.normal(rng_input,
                                         (self.nb_columns, self.nb_columns))
      q, _ = jnp.linalg.qr(unstructured_block)
      q = jnp.transpose(q)
      block_list.append(q)
    remaining_rows = self.nb_rows - nb_full_blocks * self.nb_columns
    if remaining_rows > 0:
      rng, rng_input = jax.random.split(rng)
      unstructured_block = random.normal(rng_input,
                                         (self.nb_columns, self.nb_columns))
      q, _ = jnp.linalg.qr(unstructured_block)
      q = jnp.transpose(q)
      block_list.append(q[0:remaining_rows])
    final_matrix = jnp.vstack(block_list)

    if self.scaling == 0:
      multiplier = jnp.linalg.norm(
          random.normal(self.key, (self.nb_rows, self.nb_columns)), axis=1)
    elif self.scaling == 1:
      multiplier = jnp.sqrt(float(self.nb_columns)) * jnp.ones((self.nb_rows))
    else:
      raise ValueError('Scaling must be one of {0, 1}. Was %s' % self._scaling)

    return jnp.matmul(jnp.diag(multiplier), final_matrix)
示例#30
0
 def curve_loss(t_, params_):  # t_ should be (m,) vector.
     params_ = jnp.vstack([w1, params_, w2])
     c = curve(t_, params_)
     loss_sum = 0
     for c_ in c:
         loss_sum += loss_fn(c_)
     return loss_sum / float(len(t_))