Exemple #1
0
 def _impulse(self, dt, mu, tau):
     from pyhawkes.utils.utils import logit
     # map dt onto support
     x = self.translate_dt(dt)
     # get prob at scaled x
     Z = x * (1. - x) * np.sqrt(2 * np.pi / tau)
     return (1. / Z) * np.exp(-tau / 2. * (logit(x) - mu)**2)
Exemple #2
0
 def impulse(self, dt, k1, k2):
     """
     Impulse response induced by an event on process k1 on
     the rate of process k2 at lag dt
     """
     from pyhawkes.utils.utils import logit
     mu, tau, dt_max = self.mu[k1,k2], self.tau[k1,k2], self.dt_max
     Z = dt * (dt_max - dt)/dt_max * np.sqrt(2*np.pi/tau)
     return 1./Z * np.exp(-tau/2. * (logit(dt/dt_max) - mu)**2)
 def impulse(self, dt, k1, k2):
     """
     Impulse response induced by an event on process k1 on
     the rate of process k2 at lag dt
     """
     from pyhawkes.utils.utils import logit
     mu, tau, dt_max = self.mu[k1, k2], self.tau[k1, k2], self.dt_max
     Z = dt * (dt_max - dt) / dt_max * np.sqrt(2 * np.pi / tau)
     return 1. / Z * np.exp(-tau / 2. * (logit(dt / dt_max) - mu)**2)
Exemple #4
0
 def impulse(self, dt, k1, k2, delay=0):
     """
     Impulse response induced by an event on process k1 on
     the rate of process k2 at lag dt
     """
     # delay added
     dt = dt - self.delay[k1][k2]
     from pyhawkes.utils.utils import logit
     mu, tau, dt_max = self.mu[k1, k2], self.tau[k1, k2], self.dt_max
     Z = dt * (dt_max - dt) / dt_max * np.sqrt(2 * np.pi / tau)
     temp = 1. / Z * np.exp(-tau / 2. * (logit(dt / dt_max) - mu)**2)
     # should be -inf
     temp[dt < 0] = 0
     return temp
Exemple #5
0
    def meanfieldupdate_p(self, stepsize=1.0):
        """
        Update p given the network parameters and the current variational
        parameters of the weight distributions.
        :return:
        """
        logit_p = self.network.expected_log_p() - self.network.expected_log_notp()
        logit_p += self.network.kappa * self.network.expected_log_v() - gammaln(self.network.kappa)
        logit_p += gammaln(self.mf_kappa_1) - self.mf_kappa_1 * np.log(self.mf_v_1)
        logit_p += gammaln(self.kappa_0) - self.kappa_0 * np.log(self.nu_0)
        logit_p += self.mf_kappa_0 * np.log(self.mf_v_0) - gammaln(self.mf_kappa_0)

        # p_hat = logistic(logit_p)
        # self.mf_p = (1.0 - stepsize) * self.mf_p + stepsize * p_hat

        logit_p_hat = (1-stepsize) * logit(self.mf_p) + \
                       stepsize * logit_p
        # self.mf_p = logistic(logit_p_hat)
        self.mf_p = np.clip(logistic(logit_p_hat), 1e-8, 1-1e-8)
Exemple #6
0
    def meanfieldupdate_p(self, stepsize=1.0):
        """
        Update p given the network parameters and the current variational
        parameters of the weight distributions.
        :return:
        """
        logit_p = self.network.expected_log_p(
        ) - self.network.expected_log_notp()
        logit_p += self.network.kappa * self.network.expected_log_v(
        ) - gammaln(self.network.kappa)
        logit_p += gammaln(
            self.mf_kappa_1) - self.mf_kappa_1 * np.log(self.mf_v_1)
        logit_p += gammaln(self.kappa_0) - self.kappa_0 * np.log(self.nu_0)
        logit_p += self.mf_kappa_0 * np.log(self.mf_v_0) - gammaln(
            self.mf_kappa_0)

        # p_hat = logistic(logit_p)
        # self.mf_p = (1.0 - stepsize) * self.mf_p + stepsize * p_hat

        logit_p_hat = (1-stepsize) * logit(self.mf_p) + \
                       stepsize * logit_p
        # self.mf_p = logistic(logit_p_hat)
        self.mf_p = np.clip(logistic(logit_p_hat), 1e-8, 1 - 1e-8)
Exemple #7
0
    def resample(self, data=[]):
        """
        Resample the
        :param data: a TxKxKxB array of parents. T time bins, K processes,
                     K parent processes, and B bases for each parent process.
        """
        mu_0, lmbda_0, alpha_0, beta_0 = self.mu_0, self.lmbda_0, self.alpha_0, self.beta_0
        assert data is None or isinstance(data, list)

        # 0: count, # 1: Sum of scaled dt, #2: Sum of sq scaled dt
        ss = np.zeros((3, self.K, self.K))
        for d in data:
            ss += d.compute_imp_suff_stats()

        n = ss[0]
        xbar = np.nan_to_num(ss[1] / n)
        xvar = ss[2]

        alpha_post = alpha_0 + n / 2.
        # beta_post = beta_0 + 0.5 * xvar
        # beta_post += 0.5 * lmbda_0 * n / (lmbda_0 + n) * (xbar-mu_0)**2
        beta_post = beta_0 + 0.5 * xvar + 0.5 * lmbda_0 * n * (
            xbar - mu_0)**2 / (lmbda_0 + n)

        lmbda_post = lmbda_0 + n
        mu_post = (lmbda_0 * mu_0 + n * xbar) / (lmbda_0 + n)

        from pyhawkes.utils.utils import sample_nig
        self.mu, self.tau = \
            sample_nig(mu_post, lmbda_post, alpha_post, beta_post)

        # Z is parent
        # C is process
        # S is event time
        S = [x.S for x in self.model.data_list]
        C = [x.C for x in self.model.data_list]
        Z = [x.Z for x in self.model.data_list]
        N_pts = 50
        t = np.linspace(0, self.dt_max, N_pts)
        pt = np.zeros(N_pts)
        normal_sig = self.normal_sig
        normal_mu = self.normal_mu
        pt_prior = np.exp(-(t - normal_mu)**2 / (2 * normal_sig**2)) / np.sqrt(
            2 * np.pi * normal_sig**2)
        dt_max = self.dt_max
        for k1 in range(self.K):
            for k2 in range(self.K):
                tt = np.zeros(N_pts)
                gt = np.ones(N_pts)
                for i in range(len(S)):
                    s, c, z = S[i], C[i], Z[i]
                    cind, pind = (c == k2), (c[z] == k1)
                    igmask = (z != -1)  # no parent
                    inds = cind & pind & igmask
                    if ~np.all(~inds):
                        ds = s[inds] - s[z[inds]]
                        '''
                        if min(ds) < min(t):
                            print k1,k2
                            exit(0)
                        '''
                        ll = -utils.logit(
                            np.absolute(ds[None, :] - t[:, None]), dt_max)
                        # logit grows too fast
                        ll = np.minimum(ll, 100)
                        ll = np.maximum(ll, -100)
                        ll = (ll * self.tau[k1][k2] / 2 - self.mu[k1][k2])**2
                        tt = tt + np.sum(ll, 1)
                        gt[t > min(ds)] = 0.0
                tt = np.cumsum(pt_prior * np.exp(tt - np.max(tt)) * gt)
                if (tt == 0).all():
                    self.delay[k1][k2] = 0
                    #tt = np.cumsum(pt_prior)
                    #delay =  t[np.flatnonzero(tt > np.random.uniform(0,tt[-1]))[0]]
                    #self.delay[k1][k2] = delay
                else:
                    # exp grows too fast, normalize by e^-max(tt)
                    delay = -t[np.flatnonzero(
                        tt > np.random.uniform(0, tt[-1]))[0]]
                    self.delay[k1][k2] = delay

        assert np.isfinite(self.mu).all()
        assert np.isfinite(self.tau).all()