def _local_error(self, targetM, i): pull_error = 0. ivectors = self._x[:, i, :][self._neighborpairs[:, 0]] jvectors = self._x[:, i, :][self._neighborpairs[:, 1]] diffv = ivectors - jvectors pull_error = linalg.trace(diffv.dot(targetM).dot(diffv.T)) push_error = 0.0 ivectors = self._x[:, i, :][self._set[:, 0]] jvectors = self._x[:, i, :][self._set[:, 1]] lvectors = self._x[:, i, :][self._set[:, 2]] diffij = ivectors - jvectors diffil = ivectors - lvectors lossij = diffij.dot(targetM).dot(diffij.T) lossil = diffil.dot(targetM).dot(diffil.T) mask = T.neq(self._y[self._set[:, 0]], self._y[self._set[:, 2]]) push_error = linalg.trace(mask*T.maximum(lossij - lossil + 1, 0)) self.zerocount = T.eq(linalg.diag(mask*T.maximum(lossij - lossil + 1, 0)), 0).sum() # print np.sqrt((i+1.0)/self.M) # pull_error = pull_error * np.sqrt((i+1.0)/self.M) # push_error = push_error * np.sqrt((i+1.0)/self.M) return pull_error, push_error
def wish_dist(Wi, Wj, k): """function to find the Wishart between two covariance matrice Returns distance Parameters ---------- Wi,Wj : comaplex amtrix covariance matrices k : int distance definition type to be used for calculation. Returns ------- data : float distance """ # sum of covariance matrices Wij = Wi + Wj # log of determinant probably log_j = np.ln( Wj(1, 1) * Wj(2, 2) * Wj(3, 3) * (1 - (np.real(Wj(1, 3)) ^ 2))) # this is actually: log(det(Wj)) >>Using analytically reduced form (Rignot and Chellappa, 1992) log_i = np.ln( Wi(1, 1) * Wi(2, 2) * Wi(3, 3) * (1 - (np.real(Wi(1, 3)) ^ 2))) # this is actually: Log(Wi) >>Using analytically reduced form (Rignot and Chellappa, 1992) log_ij = np.ln( Wij(1, 1) * Wij(2, 2) * Wij(3, 3) * (1 - (np.real(Wij(1, 3)) ^ 2))) # absolute of trace of inverse matrices tri = np.abs(alg.trace(alg.pinv(Wj) * Wi)) trj = np.abs(alg.trace(alg.pinv(Wi) * Wj)) if k == 1: # default Wishart distance dist = log_j + tri if k == 2: # symmetric Wishart distance dist = .5 * (log_i + log_j + tri + trj) if k == 3: # Bartlett distance dist = 2 * log_ij - log_i - log_j if k == 4: # revised Wishart distance dist = log_j - log_i + tri if k == 5: # another dstance dist = tri + trj return dist
def _global_error(self, targetM, i, lastM): mask = T.neq(self._y[self._set[:, 1]], self._y[self._set[:, 2]]) f = T.nnet.sigmoid # T.tanh g = lambda x, y: x*(1-y) #lambda x: T.maximum(x, 0) # g(lst_prediction - cur_prediction) # f(T.diag(lossil - lossij)) if i == 0: # pull_error for global 0 pull_error = 0. ivectors = self._stackx[:, i, :][self._neighborpairs[:, 0]] jvectors = self._stackx[:, i, :][self._neighborpairs[:, 1]] diffv = ivectors - jvectors pull_error = linalg.trace(diffv.dot(targetM).dot(diffv.T)) else: ivectors = self._stackx[:, i, :][self._neighborpairs[:, 0]] jvectors = self._stackx[:, i, :][self._neighborpairs[:, 1]] diffv1 = ivectors - jvectors distMcur = diffv1.dot(targetM).dot(diffv1.T) # ivectors = self._stackx[:, i-1, :][self._neighborpairs[:, 0]] # jvectors = self._stackx[:, i-1, :][self._neighborpairs[:, 1]] # diffv2 = ivectors - jvectors # distMlast = diffv2.dot(lastM).dot(diffv2.T) pull_error = linalg.trace(T.maximum(distMcur, 0)) push_error = 0.0 ivectors = self._stackx[:, i, :][self._set[:, 0]] jvectors = self._stackx[:, i, :][self._set[:, 1]] lvectors = self._stackx[:, i, :][self._set[:, 2]] diffij = ivectors - jvectors diffil = ivectors - lvectors lossij = diffij.dot(targetM).dot(diffij.T) lossil = diffil.dot(targetM).dot(diffil.T) #cur_prediction = T.diag(lossij - lossil) cur_prediction = f(T.diag(lossil - lossij)) ivectors = self._stackx[:, i-1, :][self._set[:, 0]] jvectors = self._stackx[:, i-1, :][self._set[:, 1]] lvectors = self._stackx[:, i-1, :][self._set[:, 2]] diffij = ivectors - jvectors diffil = ivectors - lvectors if i == 0: lossij = diffij.dot(diffij.T) lossil = diffil.dot(diffil.T) else: lossij = diffij.dot(lastM).dot(diffij.T) lossil = diffil.dot(lastM).dot(diffil.T) lst_prediction = f(T.diag(lossil - lossij)) push_error = T.sum(mask*(g(lst_prediction, cur_prediction))) return pull_error, push_error
def _global_error(self, targetM, i, lastM): pull_error = 0. ivectors = self._stackx[:, i, :][self._neighborpairs[:, 0]] jvectors = self._stackx[:, i, :][self._neighborpairs[:, 1]] diffv1 = ivectors - jvectors distMcur = diffv1.dot(targetM).dot(diffv1.T) ivectors = self._stackx[:, i-1, :][self._neighborpairs[:, 0]] jvectors = self._stackx[:, i-1, :][self._neighborpairs[:, 1]] diffv2 = ivectors - jvectors distMlast = diffv2.dot(lastM).dot(diffv2.T) pull_error = linalg.trace(T.maximum(distMcur - distMlast + 1, 0)) push_error = 0.0 # ivectors = self._x[:, i, :][self._set[:, 0]] # jvectors = self._x[:, i, :][self._set[:, 1]] # lvectors = self._x[:, i, :][self._set[:, 2]] # diffij = ivectors - jvectors # diffil = ivectors - lvectors # lossij = diffij.dot(targetM).dot(diffij.T) # lossil = diffil.dot(targetM).dot(diffil.T) # mask = T.neq(self._y[self._set[:, 0]], self._y[self._set[:, 2]]) # push_error = linalg.trace(mask*T.maximum(lossij - lossil + 1, 0)) return pull_error, push_error
def _global_error(self, targetM, i, lastM): mask = T.neq(self._y[self._set[:, 1]], self._y[self._set[:, 2]]) f = T.tanh #T.nnet.sigmoid if i == 0: # pull_error for global 0 pull_error = 0. ivectors = self._stackx[:, i, :][self._neighborpairs[:, 0]] jvectors = self._stackx[:, i, :][self._neighborpairs[:, 1]] diffv = ivectors - jvectors pull_error = linalg.trace(diffv.dot(targetM).dot(diffv.T)) # push_error for global 0 push_error = 0.0 ivectors = self._stackx[:, i, :][self._set[:, 0]] jvectors = self._stackx[:, i, :][self._set[:, 1]] lvectors = self._stackx[:, i, :][self._set[:, 2]] diffij = ivectors - jvectors diffil = ivectors - lvectors lossij = diffij.dot(targetM).dot(diffij.T) lossil = diffil.dot(targetM).dot(diffil.T) #cur_prediction = T.diag(lossij - lossil) cur_prediction = f(T.diag(lossil - lossij)) ivectors = self._stackx[:, i-1, :][self._set[:, 0]] jvectors = self._stackx[:, i-1, :][self._set[:, 1]] lvectors = self._stackx[:, i-1, :][self._set[:, 2]] diffij = ivectors - jvectors diffil = ivectors - lvectors lossij = diffij.dot(diffij.T) lossil = diffil.dot(diffil.T) #lst_prediction = T.diag(lossij - lossil) lst_prediction = f(T.diag(lossil - lossij)) push_error = T.sum(mask*(lst_prediction - cur_prediction)) else: ivectors = self._stackx[:, i, :][self._neighborpairs[:, 0]] jvectors = self._stackx[:, i, :][self._neighborpairs[:, 1]] diffv1 = ivectors - jvectors distMcur = diffv1.dot(targetM).dot(diffv1.T) ivectors = self._stackx[:, i-1, :][self._neighborpairs[:, 0]] jvectors = self._stackx[:, i-1, :][self._neighborpairs[:, 1]] diffv2 = ivectors - jvectors distMlast = diffv2.dot(lastM).dot(diffv2.T) pull_error = linalg.trace(T.maximum(distMcur - distMlast + 1, 0)) # self.debug.append( self._y[self._set[:, 0] ) push_error = 0.0 ivectors = self._stackx[:, i, :][self._set[:, 0]] jvectors = self._stackx[:, i, :][self._set[:, 1]] lvectors = self._stackx[:, i, :][self._set[:, 2]] diffij = ivectors - jvectors diffil = ivectors - lvectors lossij = diffij.dot(targetM).dot(diffij.T) lossil = diffil.dot(targetM).dot(diffil.T) #cur_prediction = T.diag(lossij - lossil) cur_prediction = f(T.diag(lossil - lossij)) ivectors = self._stackx[:, i-1, :][self._set[:, 0]] jvectors = self._stackx[:, i-1, :][self._set[:, 1]] lvectors = self._stackx[:, i-1, :][self._set[:, 2]] diffij = ivectors - jvectors diffil = ivectors - lvectors lossij = diffij.dot(lastM).dot(diffij.T) lossil = diffil.dot(lastM).dot(diffil.T) #lst_prediction = T.diag(lossij - lossil) lst_prediction = f(T.diag(lossil - lossij)) push_error = T.sum(mask*(lst_prediction - cur_prediction)) return pull_error, push_error