def soft_assign(self,x,mixmean=None,mixcoef=None, mixcov=None): """ Calculate the probability of x for each of the k classes. Typically the sample is assigned to class n with n = arg max(resp). Parameters ---------- x : (d,n) ndarray The observations that need to be classified. mixmeans : (d,k), ndarray, or None The means of the k mixture components. If `None` use the values of the class. mixcoef : (k,) ndarray, or None The k mixture coefficients If `None` use the values of the class. mixcov : (k,d,d), ndarray, or None The covariances of the k mixture components If `None` use the values of the class. Return ------ prob : (k,) ndarray The probabilities for each of the k classes. """ if len(x.shape)==1: x = x[:,None] d,n = x.shape # Get the necessary parameters from the class mixmean = self.mixmean mixcoef = self.mixcoef mixcov = self.mixcov k = len(mixcoef) prob = np.zeros((k,n)) for j in range(n): for i in range(k): g = Gauss(mixmean[:,i],mixcov[i]) prob[i,j] = g.f(x[:,j])*mixcoef[i] return prob/np.sum(prob,axis=0)[None,:]
def _respon(self,mixmean,mixcoef,mixcov): """ Calculate the responsibilities of each data point for each class k. Parameters ---------- mixmean : (d,k) ndarray The means of the k mixture components mixcoef : (d,) array The mixture coefficient for each component mixcov : (k,d,d) ndarray The covariance matrices of the k mixture components Return ------ gam : (k,n) ndarray The responsibilities of data point x_n to the mixture components k """ data = self.data d,n = data.shape k = self.k # TODO TEST THIS gam = np.zeros((k,n)) # print "inside _respons" # print np.shape(self.logf(data.T[0], mixmean, (mixcoef), mixcov)) # print np.sum(mixcoef,axis = 0) gaussian = np.zeros((k,n)) for j in range(n): for i in range(k): g = Gauss(mixmean[:,i],mixcov[i]) gaussian[i,j] = np.log(g.f(data[:,j])) + np.log(mixcoef[i]) # for i in range(k): # gaussian_nominator = self.logf(data[j], mixmean, mixcoef, mixcov) + np.log(mixcoef) # print "---------" # print gaussian_nominator.shape # print gaussian_denominator_total.shape # print gam.shape gaussian = gaussian - np.log(np.sum(np.exp(gaussian),axis=0)[None,:]) # gam[:,j] = np.exp(gaussian_nominator - gaussian_denominator_total) gam = np.exp(gaussian) return gam
def soft_assign(self, x, mixmean=None, mixcoef=None, mixcov=None): """ Calculate the probability of x for each of the k classes. Typically the sample is assigned to class n with n = arg max(resp). Parameters ---------- x : (d,n) ndarray The observations that need to be classified. mixmeans : (d,k), ndarray, or None The means of the k mixture components. If `None` use the values of the class. mixcoef : (k,) ndarray, or None The k mixture coefficients If `None` use the values of the class. mixcov : (k,d,d), ndarray, or None The covariances of the k mixture components If `None` use the values of the class. Return ------ prob : (k,) ndarray The probabilities for each of the k classes. """ if len(x.shape) == 1: x = x[:, None] d, n = x.shape # Get the necessary parameters from the class mixmean = self.mixmean mixcoef = self.mixcoef mixcov = self.mixcov k = len(mixcoef) prob = np.zeros((k, n)) for j in range(n): for i in range(k): g = Gauss(mixmean[:, i], mixcov[i]) prob[i, j] = g.f(x[:, j]) * mixcoef[i] return prob / np.sum(prob, axis=0)[None, :]
def _respon(self, mixmean, mixcoef, mixcov): """ Calculate the responsibilities of each data point for each class k. Parameters ---------- mixmean : (d,k) ndarray The means of the k mixture components mixcoef : (d,) array The mixture coefficient for each component mixcov : (k,d,d) ndarray The covariance matrices of the k mixture components Return ------ gam : (k,n) ndarray The responsibilities of data point x_n to the mixture components k """ data = self.data d, n = data.shape k = self.k # TODO TEST THIS gam = np.zeros((k, n)) # print "inside _respons" # print np.shape(self.logf(data.T[0], mixmean, (mixcoef), mixcov)) # print np.sum(mixcoef,axis = 0) gaussian = np.zeros((k, n)) for j in range(n): for i in range(k): g = Gauss(mixmean[:, i], mixcov[i]) gaussian[i, j] = np.log(g.f(data[:, j])) + np.log(mixcoef[i]) # for i in range(k): # gaussian_nominator = self.logf(data[j], mixmean, mixcoef, mixcov) + np.log(mixcoef) # print "---------" # print gaussian_nominator.shape # print gaussian_denominator_total.shape # print gam.shape gaussian = gaussian - np.log(np.sum(np.exp(gaussian), axis=0)[None, :]) # gam[:,j] = np.exp(gaussian_nominator - gaussian_denominator_total) gam = np.exp(gaussian) return gam
def f(self,x,mixmean=None, mixcoef=None, mixcov=None): """ Evaluate the gmm at x. Parameters ---------- x : (d,) ndarray A single d-dimensional observation mixmean : (d,k), ndarray, or None The means of the k mixture components If `None` use the values of the class. mixcoef : (k,) ndarray, or None The k mixture coefficients If `None` use the values of the class. mixcov : (k,d,d), ndarray, or None The covariances of the k mixture components. If `None` use the values of the class. Return ------ val : float The value of the gmm at given x. """ if mixmean == None: mixmean = self.mixmean if mixcoef == None: mixcoef = self.mixcoef if mixcov == None: mixcov = self.mixcov # print mixcoef # print np.shape(mixcoef) k = len(mixcoef) comp = np.zeros((k,)) for j in range(k): g = Gauss(mixmean[:,j],mixcov[j]) comp[j] = g.f(x) return mixcoef.dot(comp)
def f(self, x, mixmean=None, mixcoef=None, mixcov=None): """ Evaluate the gmm at x. Parameters ---------- x : (d,) ndarray A single d-dimensional observation mixmean : (d,k), ndarray, or None The means of the k mixture components If `None` use the values of the class. mixcoef : (k,) ndarray, or None The k mixture coefficients If `None` use the values of the class. mixcov : (k,d,d), ndarray, or None The covariances of the k mixture components. If `None` use the values of the class. Return ------ val : float The value of the gmm at given x. """ if mixmean == None: mixmean = self.mixmean if mixcoef == None: mixcoef = self.mixcoef if mixcov == None: mixcov = self.mixcov # print mixcoef # print np.shape(mixcoef) k = len(mixcoef) comp = np.zeros((k, )) for j in range(k): g = Gauss(mixmean[:, j], mixcov[j]) comp[j] = g.f(x) return mixcoef.dot(comp)