Ejemplo n.º 1
0
    def _joblib_resample_A_given_W(self, data):
        """
        Resample A given W. This must be immediately followed by an
        update of z | A, W. This  version uses joblib to parallelize
        over columns of A.
        :return:
        """
        if len(data) == 0:
            self.A = (np.random.rand(self.K, self.K) <
                      self.network.P) * self.Amask
            return

        # pull params from data
        data = data[0]  # only consider first data
        S = data.S
        N = data.N
        Ns = data.Ns.copy(order='C').astype('float64')
        C = data.C
        T = data.T
        Z = data.Z
        dt_max = data.dt_max

        # pull params from model
        mu, tau = self.model.impulse_model.mu, self.model.impulse_model.tau
        min_supp, max_supp = self.model.impulse_model.support
        P = self.model.network.P
        W = self.model.W
        lambda0 = self.model.lambda0

        # precompute impulse responses
        lambda_ir = np.zeros((N, self.K))
        compute_weighted_impulses_at_events(S, C, Z, dt_max, min_supp,
                                            max_supp, W, mu, tau, lambda_ir)

        # We can naively parallelize over receiving neurons, k2

        def yieldArgs():
            # P_k, K, Ns, T, W_k, lambda0_k, A_mask, A_col, lambda_ir_k
            for k2 in range(self.K):
                P_k = P[:, k2].copy(order='C').astype('float64')
                A_col = self.A[:, k2].copy(order='C').astype('float64')
                W_k = W[:, k2].copy(order='C').astype('float64')
                A_mask = self.Amask[:, k2].copy(order='C').astype('float64')
                lambda0_k = float(lambda0[k2])
                lambda_ir_k = lambda_ir[C == k2, :].copy(
                    order='C').astype('float64')
                args = (P_k, self.K, Ns, T, W_k, lambda0_k, A_mask, A_col,
                        lambda_ir_k)
                yield args

        A_cols = list(map(ct_resample_column_of_A_fast, yieldArgs()))
        """
        A_cols = Parallel(n_jobs=-1, backend="multiprocessing")(
            delayed(ct_resample_column_of_A_fast)(_) for _ in yieldArgs())
        """

        self.A = np.array(A_cols).T * self.Amask
Ejemplo n.º 2
0
    def _compute_weighted_impulses_at_events(self, data):
        from pyhawkes.internals.continuous_time_helpers import \
            compute_weighted_impulses_at_events

        N, S, C, Z, dt_max = data.N, data.S, data.C, data.Z, self.model.dt_max
        W = self.W
        mu, tau = self.model.impulse_model.mu, self.model.impulse_model.tau
        lmbda = np.zeros((N, self.K))
        compute_weighted_impulses_at_events(S, C, Z, dt_max, W, mu, tau, lmbda)
        return lmbda
def _compute_weighted_impulses_at_events(data):
    from pyhawkes.internals.continuous_time_helpers import \
        compute_weighted_impulses_at_events

    N, S, C, Z, dt_max = data.N, data.S, data.C, data.Z, model.dt_max
    W = model.W
    mu, tau = model.impulse_model.mu, model.impulse_model.tau
    lmbda = np.zeros((N, model.K))
    compute_weighted_impulses_at_events(S, C, Z, dt_max, W, mu, tau, lmbda)
    return lmbda
Ejemplo n.º 4
0
def _compute_weighted_impulses_at_events(data):
    from pyhawkes.internals.continuous_time_helpers import \
        compute_weighted_impulses_at_events

    N, S, C, Z, dt_max = data.N, data.S, data.C, data.Z, model.dt_max
    W = model.W
    mu, tau = model.impulse_model.mu, model.impulse_model.tau
    min_supp, max_supp = model.impulse_model.support
    lmbda = np.zeros((N, model.K))
    compute_weighted_impulses_at_events(S, C, Z, dt_max, min_supp, max_supp, W,
                                        mu, tau, lmbda)
    return lmbda