コード例 #1
0
    def gradient_A_ij(self, Aij, args):
        """gradient of objective function w.r.t. aij:
            sum_n ( (1/S) * sum(lamda_gs(t_n, x_n, y_n) * (sum_g sum_tk<t kappa_fxn )) )  I(ij) -
            sum_n ( integral ) I(ij)
        """

        mu = args[0]['mu']
        bw = args[0]['bw']
        samples = args[0]['samples']

        Aij = np.reshape(Aij, (self.U, self.U))

        gradient_aij = np.zeros((self.U, self.U))

        community_points = dict(
            (key, deque(maxlen=10)) for key in range(0, self.M))

        for event in self.events:
            user = event[0]
            gradient = []
            for g in samples[user]:

                lamda = hawkes_process().hawkes_intensity(
                    mu[user], Aij[user], community_points[g], event[1],
                    event[2], event[3], bw[user])

                gradient.append(
                    self.grad_intensity_aij(lamda, Aij[user],
                                            community_points[g], event[1],
                                            event[2], event[3], bw[user]))
                if (len(community_points[g]) <= 10):
                    community_points[g].append(
                        (event[1], event[2], event[3], user))
                else:
                    community_points[g].popleft()
                    community_points[g].append(
                        (event[1], event[2], event[3], user))

            for k in range(0, len(gradient)):
                gradient_aij[user] = gradient_aij[user] + (
                    (1 / self.S) * gradient[k][0])
                # for (j,v) in gradient[k].iteritems():
                #     gradient_aij[(user,j)] = gradient_aij[(user,j)] + ((1 / self.S) * v)

        (K, Kij) = self.likelihood.integral_full(Aij, bw)

        # final_gradient = []
        # for i in range(0, self.U):
        #     for j in range(0,self.U):
        #         final_gradient.append(gradient_aij[(i,j)] - Kij[(i,j)])
        #
        # final_gradient = np.array(final_gradient)

        gradient_aij = gradient_aij - Kij
        gradient_aij = np.reshape(gradient_aij,
                                  (1, np.product(gradient_aij.shape)))

        return gradient_aij
コード例 #2
0
ファイル: transformation.py プロジェクト: gvinayak/colab
    def gradient_gamma(self, gamma, args):
        """gradient of objective function w.r.t. mu:
               sum_n ( (1/S) * sum( 1 / lamda_gs(t_n, x_n, y_n) ) ) - T*X*Y
                    """

        phi = args[0]['phi']
        aij = args[0]['aij']
        bw = args[0]['bw']
        samples = args[0]['samples']

        # print phi
        # print aij
        # print bw
        # print gamma

        gradient_user = {}
        for i in range(0, self.U):
            gradient_user[i] = []

        community_points = {}
        for community in range(0, self.M):
            community_points[community] = []

        n = 0
        for event in self.events:
            user = event[0]
            sampled_lamda = []
            for g in samples[user]:
                #g = simulate_data().sample_multinomial(phi[user])
                lamda = hawkes_process().hawkes_intensity(
                    np.exp(gamma[user]), aij[user], community_points[g],
                    self.events[n][1], self.events[n][2], self.events[n][3],
                    bw[user])
                if lamda != 0.0:
                    sampled_lamda.append(1.0 / lamda)
                community_points[g].append(
                    (self.events[n][1], self.events[n][2], self.events[n][3],
                     user))
            gradient_user[user].append((1 / self.S) * sum(sampled_lamda))
            n += 1

        gradient = []
        for i in range(0, self.U):
            # print "gradient of user : "******" is : "+str(sum(gradient_user[i]))
            # print sum(gradient_user[i]) + (self.T * self.X * self.Y)
            gradient.append((sum(gradient_user[i]) -
                             (self.T * self.X * self.Y)) * np.exp(gamma[i]))

        gradient = np.array(gradient)
        # gradient = gradient/sum(gradient)
        print gradient
        return gradient
コード例 #3
0
ファイル: likelihood.py プロジェクト: gvinayak/colab
    def first_term(self, mu, samples, Aij, bw):

        # mu = args[0]['mu']
        # samples =  args[0]['samples']
        # Aij =  args[0]['Aij']
        # bw = args[0]['bw']

        Aij = np.reshape(Aij, (self.U, self.U))

        community_points = dict(
            (key, deque(maxlen=10)) for key in range(0, self.M))

        first_sum = []
        n = 0
        for event in self.events:
            user = event[0]
            # first summation
            sampled_lamda = []
            for g in samples[user]:
                # g = simulate_data().sample_multinomial(phi[user])
                if (mu[user] > 0):
                    lamda = hawkes_process().hawkes_intensity(
                        mu[user], Aij[user], community_points[g],
                        self.events[n][1], self.events[n][2],
                        self.events[n][3], bw[user])

                    log_lamda = np.log(max(0.0001, lamda))
                    # print "log lamda : ", log_lamda
                    sampled_lamda.append(log_lamda)
                    if (len(community_points[g]) <= 10):
                        community_points[g].append(
                            (self.events[n][1], self.events[n][2],
                             self.events[n][3], user))
                    else:
                        community_points[g].popleft()
                        community_points[g].append(
                            (self.events[n][1], self.events[n][2],
                             self.events[n][3], user))
                    # print " community_points : ", len(community_points[g])
            first_sum.append((1.0 / self.S) * sum(sampled_lamda))

            n += 1

        return sum(first_sum)
コード例 #4
0
    def gradient_mu(self, mu, args):
        """gradient of objective function w.r.t. mu:
           sum_n ( (1/S) * sum( 1 / lamda_gs(t_n, x_n, y_n) ) ) - T*X*Y
        """

        aij = args[0]['Aij']
        bw = args[0]['bw']
        samples = args[0]['samples']

        gradient_user = np.zeros((self.U, 1))
        # gradient_user = {}
        # for i in range(0, self.U):
        #     gradient_user[i] = []

        community_points = dict(
            (key, deque(maxlen=10)) for key in range(0, self.M))

        for event in self.events:
            user = event[0]
            s = 0.0
            for g in samples[user]:
                # g = simulate_data().sample_multinomial(phi[user])
                lamda = hawkes_process().hawkes_intensity(
                    mu[user], aij[user], community_points[g], event[1],
                    event[2], event[3], bw[user])
                s = s + (1.0 / max(0.0001, lamda))

                if (len(community_points[g]) <= 10):
                    community_points[g].append(
                        (event[1], event[2], event[3], user))
                else:
                    community_points[g].popleft()
                    community_points[g].append(
                        (event[1], event[2], event[3], user))

            gradient_user[user] = gradient_user[user] + ((1.0 / self.S) * s)

        gradient = np.zeros((self.U, 1))
        gradient = gradient + gradient_user - (self.T * self.X * self.Y)

        # print gradient.T
        return gradient.T[0, :]
コード例 #5
0
ファイル: transformation.py プロジェクト: gvinayak/colab
    def obj_func(self, gamma, args):
        """Likelihood expression:
        sum_n ( (1/S) * sum( log (lamda_gs(t_n, x_n, y_n) ) ) ) +
        sum_n sum_m phi[i,g]( log theta_[m,c_n] + log pi[i_n,m] ) -
        TXY * \sum_i(mu_i) + sum_k A[i_k,i] sum_n ( np.exp(- nu(t_n - t_k) ) (erf((x[n],y[n] - x[k],y[k])/(sqrt(2*h)) - erf((x[n-1],y[n-1] - x[k],y[k])/(sqrt(2*h)) ) )
        - sum_i sum_g (q(g) log q(g))"""

        aij = args[0]['aij']
        phi = args[0]['phi']
        theta = args[0]['theta']
        bw = args[0]['bw']
        pi = args[0]['pi']
        samples = args[0]['samples']

        likelihood = 0
        first_sum = []
        second_sum = []
        community_points = {}

        for community in range(0, self.M):
            community_points[community] = []

        n = 0
        for event in self.events:
            user = event[0]
            # first summation
            sampled_lamda = []
            for g in samples[user]:
                #g = simulate_data().sample_multinomial(phi[user])
                community_points[g].append(
                    (self.events[n][1], self.events[n][2], self.events[n][3],
                     user))
                lamda = hawkes_process().hawkes_intensity(
                    np.exp(gamma[user]), aij[user], community_points[g],
                    self.events[n][1], self.events[n][2], self.events[n][3],
                    bw[user])
                if lamda != 0.0:
                    log_lamda = math.log(lamda)
                    sampled_lamda.append(log_lamda)

            first_sum.append(sum(sampled_lamda) / self.S)

            # second summation
            c_n = self.events[n][4]
            expectation_m = []
            for m in range(0, self.M):
                expectation_m.append(
                    phi[user, m] *
                    (math.log(theta[c_n, m]) + math.log(pi[user, m])))
            second_sum.append(sum(expectation_m))

            n += 1

        third_sum = self.integral(gamma, aij, bw)
        # print "integral : " + str(third_sum)

        fourth_sum = self.E_q_q_g(phi)
        # print "e_q_q_g : "+str(fourth_sum)

        likelihood = sum(first_sum) + sum(second_sum) - third_sum - fourth_sum
        # print "gamma : " + str(gamma)
        return likelihood
コード例 #6
0
    def gradient_phi(self, phi, args):
        """gradient of objective function w.r.t. phi:
            sum_n ( (1/S) * sum( lamda_gs(t_n, x_n, y_n) *  ) ) +
            sum_n sum_m ( log theta_[m,c_n] + log pi[i_n,m] )
        """

        Aij = args[0]['Aij']
        mu = args[0]['mu']
        bw = args[0]['bw']
        pi = args[0]['pi']
        theta = args[0]['theta']
        samples = args[0]['samples']
        phi = np.reshape(phi, (self.U, self.M))
        theta = np.reshape(theta, (self.M, self.V))
        Aij = np.reshape(Aij, (self.U, self.U))

        # print phi

        n = 0
        gradient_phi = np.zeros((self.U, self.M))
        gradient_phi_theta_pi = np.zeros((self.U, self.M))
        user_community_points = {}

        community_points = dict(
            (key, deque(maxlen=10)) for key in range(0, self.M))

        for event in self.events:
            user = event[0]
            # first summation
            gradient = np.zeros((1, self.M))

            for g in samples[user]:
                if user_community_points.has_key(user):
                    if user_community_points[user].has_key(g):
                        user_community_points[user][g] += 1
                    else:
                        user_community_points[user][g] = 1
                else:
                    user_community_points[user] = {}
                    user_community_points[user][g] = 1

                lamda = hawkes_process().hawkes_intensity(
                    mu[user], Aij[user], community_points[g],
                    self.events[n][1], self.events[n][2], self.events[n][3],
                    bw[user])
                grad_phi_q = user_community_points[user][g] / max(
                    0.0001, phi[user, g])
                if (len(community_points[g]) <= 10):
                    community_points[g].append(
                        (self.events[n][1], self.events[n][2],
                         self.events[n][3], user))
                else:
                    community_points[g].popleft()
                    community_points[g].append(
                        (self.events[n][1], self.events[n][2],
                         self.events[n][3], user))

                gradient[0, g] = gradient[0, g] + np.log(max(
                    0.0001, lamda)) * grad_phi_q

            gradient_phi[user] = gradient_phi[user] + (1.0 /
                                                       self.S) * gradient[0]
            # gradient_phi[(user,g)] = gradient_phi[(user,g)] + ((1.0 / self.S) * v)

            # second summation

            c_n = self.events[n][4]

            for m in range(0, self.M):
                gradient_phi_theta_pi[(user, m)] = gradient_phi_theta_pi[
                    (user, m)] + np.log(max(0.0001, theta[(m, c_n)])) + np.log(
                        max(0.0001, pi[user, m]))

            n += 1

        # third summation

        gradient_phi_eq = 1.0 + np.log(phi)

        # for i in range(0, gradient_phi.shape[0]):
        #     for m in range(0, gradient_phi.shape[1]):
        #         # print "gradient_phi[(i,m)] : " + str(gradient_phi[(i, m)])
        #         # print "gradient_phi_theta_pi[(i,m)] : " + str(gradient_phi_theta_pi[(i, m)])
        #         # print "gradient_phi_eq[(i,m)] : "+str(gradient_phi_eq[(i,m)])
        #
        #         gradient_phi[(i,m)] = gradient_phi[(i,m)] + gradient_phi_theta_pi[(i,m)] - gradient_phi_eq[(i,m)]

        gradient_phi = gradient_phi + gradient_phi_theta_pi - gradient_phi_eq

        gradient_phi = np.reshape(gradient_phi,
                                  (1, np.product(gradient_phi.shape)))

        # print "grad phi"
        # print gradient_phi[0,:]
        return gradient_phi[0, :]