def gradient(theta, data, modelType): if modelType == '6': mu = np.array(theta[:2]).reshape(2, 1) alpha = np.array(theta[2:6]).reshape(2, 2) if modelType == '4': mu = np.array([theta[0], theta[0]]).reshape(2, 1) alpha = np.array([theta[2], theta[3], theta[3], theta[2]]).reshape(2, 2) if modelType == '2cross': mu = np.array([theta[0], theta[0]]).reshape(2, 1) alpha = np.array([0.0, theta[3], theta[3], 0.0]).reshape(2, 2) beta = np.ones((2, 1)) pos = data[data[:, 1] == 1.0, 0].reshape(-1, 1) neg = data[data[:, 1] == -1.0, 0].reshape(-1, 1) pos = pos.astype(float, copy=False) neg = neg.astype(float, copy=False) N = pos.shape[0] M = neg.shape[0] T = data[-1, 0] R11 = getR.getR11(N, beta[0, 0], pos) R12 = getR.getR12(N, M, beta[0, 0], pos, neg) R21 = getR.getR21(N, M, beta[1, 0], pos, neg) R22 = getR.getR22(M, beta[1, 0], neg) gmu1 = -T + np.sum( 1 / (mu[0, 0] + alpha[0, 0] * R11[1:] + alpha[0, 1] * R12[1:])) gmu2 = -T + np.sum( 1 / (mu[1, 0] + alpha[1, 0] * R21[1:] + alpha[1, 1] * R22[1:])) galpha11 = -np.sum(1-np.exp(-beta[0,0]*(T-pos)))/beta[0,0] + \ np.sum(R11[1:]/(mu[0,0]+alpha[0,0]*R11[1:]+alpha[0,1]*R12[1:])) galpha12 = -np.sum(1-np.exp(-beta[0,0]*(T-neg)))/beta[0,0] + \ np.sum(R12[1:]/(mu[0,0]+alpha[0,0]*R11[1:]+alpha[0,1]*R12[1:])) galpha21 = -np.sum(1-np.exp(-beta[1,0]*(T-pos)))/beta[1,0] + \ np.sum(R21[1:]/(mu[1,0]+alpha[1,0]*R21[1:]+alpha[1,1]*R22[1:])) galpha22 = -np.sum(1-np.exp(-beta[1,0]*(T-neg)))/beta[1,0] + \ np.sum(R22[1:]/(mu[1,0]+alpha[1,0]*R21[1:]+alpha[1,1]*R22[1:])) return -np.array([gmu1, gmu2, galpha11, galpha12, galpha21, galpha22])
def gradient(theta,data,modelType): if modelType == '6': mu = np.array(theta[:2]).reshape(2,1) alpha = np.array(theta[2:6]).reshape(2,2) if modelType == '4': mu = np.array([theta[0],theta[0]]).reshape(2,1) alpha = np.array([theta[2],theta[3],theta[3],theta[2]]).reshape(2,2) if modelType == '2cross': mu = np.array([theta[0],theta[0]]).reshape(2,1) alpha = np.array([0.0,theta[3],theta[3],0.0]).reshape(2,2) beta = np.ones((2,1)) pos = data[data[:,1] == 1.0,0].reshape(-1,1) neg = data[data[:,1] == -1.0,0].reshape(-1,1) pos = pos.astype(float,copy=False) neg = neg.astype(float,copy=False) N = pos.shape[0] M = neg.shape[0] T = data[-1,0] R11 = getR.getR11(N,beta[0,0],pos) R12 = getR.getR12(N,M,beta[0,0],pos,neg) R21 = getR.getR21(N,M,beta[1,0],pos,neg) R22 = getR.getR22(M,beta[1,0],neg) gmu1 = -T + np.sum(1/(mu[0,0]+alpha[0,0]*R11[1:]+alpha[0,1]*R12[1:])) gmu2 = -T + np.sum(1/(mu[1,0]+alpha[1,0]*R21[1:]+alpha[1,1]*R22[1:])) galpha11 = -np.sum(1-np.exp(-beta[0,0]*(T-pos)))/beta[0,0] + \ np.sum(R11[1:]/(mu[0,0]+alpha[0,0]*R11[1:]+alpha[0,1]*R12[1:])) galpha12 = -np.sum(1-np.exp(-beta[0,0]*(T-neg)))/beta[0,0] + \ np.sum(R12[1:]/(mu[0,0]+alpha[0,0]*R11[1:]+alpha[0,1]*R12[1:])) galpha21 = -np.sum(1-np.exp(-beta[1,0]*(T-pos)))/beta[1,0] + \ np.sum(R21[1:]/(mu[1,0]+alpha[1,0]*R21[1:]+alpha[1,1]*R22[1:])) galpha22 = -np.sum(1-np.exp(-beta[1,0]*(T-neg)))/beta[1,0] + \ np.sum(R22[1:]/(mu[1,0]+alpha[1,0]*R21[1:]+alpha[1,1]*R22[1:])) return -np.array([gmu1,gmu2,galpha11,galpha12,galpha21,galpha22])
def likelihood(theta, data, modelType): ''' Calculate the likelihood of hawkes model given theta and data data is the np decomposed representation of data ''' if modelType == '6': mu = np.array(theta[:2]).reshape(2, 1) alpha = np.array(theta[2:6]).reshape(2, 2) if modelType == '4': mu = np.array([theta[0], theta[0]]).reshape(2, 1) alpha = np.array([theta[2], theta[3], theta[3], theta[2]]).reshape(2, 2) if modelType == '2cross': mu = np.array([theta[0], theta[0]]).reshape(2, 1) alpha = np.array([0.0, theta[3], theta[3], 0.0]).reshape(2, 2) #Fix beta to be [1,1,1,1] beta = np.ones((2, 1)) pos = data[data[:, 1] == 1.0, 0].reshape(-1, 1) neg = data[data[:, 1] == -1.0, 0].reshape(-1, 1) pos = pos.astype(float, copy=False) neg = neg.astype(float, copy=False) N = pos.shape[0] M = neg.shape[0] T = data[-1, 0] R11 = getR.getR11(N, beta[0, 0], pos) R12 = getR.getR12(N, M, beta[0, 0], pos, neg) R21 = getR.getR21(N, M, beta[1, 0], pos, neg) R22 = getR.getR22(M, beta[1, 0], neg) L1 = -mu[0,0]*T -(alpha[0,0]/beta[0,0])*np.sum(1-np.exp(-beta[0,0]*(T-pos))) -\ (alpha[0,1]/beta[0,0])*np.sum(1-np.exp(-beta[0,0]*(T-neg))) +\ np.sum(np.log(mu[0,0]+alpha[0,0]*R11[1:]+alpha[0,1]*R12[1:])) L2 = -mu[1,0]*T -(alpha[1,0]/beta[1,0])*np.sum(1-np.exp(-beta[1,0]*(T-pos))) -\ (alpha[1,1]/beta[1,0])*np.sum(1-np.exp(-beta[1,0]*(T-neg))) +\ np.sum(np.log(mu[1,0]+alpha[1,0]*R21[1:]+alpha[1,1]*R22[1:])) return -L1 - L2
def likelihood(theta,data,modelType): ''' Calculate the likelihood of hawkes model given theta and data data is the np decomposed representation of data ''' if modelType == '6': mu = np.array(theta[:2]).reshape(2,1) alpha = np.array(theta[2:6]).reshape(2,2) if modelType == '4': mu = np.array([theta[0],theta[0]]).reshape(2,1) alpha = np.array([theta[2],theta[3],theta[3],theta[2]]).reshape(2,2) if modelType == '2cross': mu = np.array([theta[0],theta[0]]).reshape(2,1) alpha = np.array([0.0,theta[3],theta[3],0.0]).reshape(2,2) #Fix beta to be [1,1,1,1] beta = np.ones((2,1)) pos = data[data[:,1] == 1.0,0].reshape(-1,1) neg = data[data[:,1] == -1.0,0].reshape(-1,1) pos = pos.astype(float,copy=False) neg = neg.astype(float,copy=False) N = pos.shape[0] M = neg.shape[0] T = data[-1,0] R11 = getR.getR11(N,beta[0,0],pos) R12 = getR.getR12(N,M,beta[0,0],pos,neg) R21 = getR.getR21(N,M,beta[1,0],pos,neg) R22 = getR.getR22(M,beta[1,0],neg) L1 = -mu[0,0]*T -(alpha[0,0]/beta[0,0])*np.sum(1-np.exp(-beta[0,0]*(T-pos))) -\ (alpha[0,1]/beta[0,0])*np.sum(1-np.exp(-beta[0,0]*(T-neg))) +\ np.sum(np.log(mu[0,0]+alpha[0,0]*R11[1:]+alpha[0,1]*R12[1:])) L2 = -mu[1,0]*T -(alpha[1,0]/beta[1,0])*np.sum(1-np.exp(-beta[1,0]*(T-pos))) -\ (alpha[1,1]/beta[1,0])*np.sum(1-np.exp(-beta[1,0]*(T-neg))) +\ np.sum(np.log(mu[1,0]+alpha[1,0]*R21[1:]+alpha[1,1]*R22[1:])) return -L1-L2
def compensator(theta,data): ''' Calculate the compensator for a given bivariate hawkes process sequence ''' mu = np.array(theta[:2]).reshape(2,1) alpha = np.array(theta[2:6]).reshape(2,2) beta = np.ones((2,1)) data[:,0] = np.cumsum(data[:,0]) pos = data[data[:,1] == 1.0,0].reshape(-1,1) neg = data[data[:,1] == -1.0,0].reshape(-1,1) pos = pos.astype(float,copy=False) neg = neg.astype(float,copy=False) N = pos.shape[0] M = neg.shape[0] T = data[-1,0] A11 = getR.getR11(N,beta[0,0],pos) A12 = getR.getR12(N,M,beta[0,0],pos,neg) A21 = getR.getR21(N,M,beta[1,0],pos,neg) A22 = getR.getR22(M,beta[1,0],neg) Lambda1 = np.zeros((N-1,1)) Lambda2 = np.zeros((M-1,1)) for i in range(1, N): #Lambda_i is the compensator between t(i-1) and i #therefore i ranges from 1 to N #i-1 ranges from 0 to N-1 #when store Lambda_i into Lambda array, the first element goes to #index 0, so on and so forth storeIndex = i-1 dur = pos[i,0] - pos[i-1,0] beta1 = beta[0,0] mu1 = mu[0,0] alpha11 = alpha[0,0] alpha12 = alpha[0,1] value = mu1 * dur part1 = alpha11/beta1 * ((1+A11[i-1,0])*(1-np.exp(-beta1*dur))) temp = neg[(neg>=pos[i-1,0]) & (neg<pos[i,0])] if temp.shape[0] > 0: temp = np.sum(1-np.exp(-beta1 * (pos[i,0] - temp))) else: temp = 0.0 part2 = alpha12/beta1 * ((1-np.exp(-beta1 * dur)) * A12[i-1,0] + temp) value = value + part1 + part2 Lambda1[storeIndex] = value for i in range(1, M): storeIndex = i-1 dur = neg[i,0] - neg[i-1,0] beta2 = beta[1,0] mu2 = mu[1,0] alpha21 = alpha[1,0] alpha22 = alpha[1,1] value = mu2 * dur part1 = alpha22/beta2 * ((1+A22[i-1,0])*(1-np.exp(-beta2*dur))) temp = pos[(pos>=neg[i-1,0]) & (pos<neg[i,0])] if temp.shape[0] > 0: temp = np.sum(1-np.exp(-beta2 * (neg[i,0] - temp))) else: temp = 0.0 part2 = alpha21/beta2 * ((1-np.exp(-beta2 * dur)) * A21[i-1,0] + temp) value = value + part1 + part2 Lambda2[storeIndex] = value return Lambda1,Lambda2