def predict(self, x): w = 0 for t in self.supportVec: w += self.alpha[t] * self.y[t] * self.kernel(self.X[:, t], x).flatten(1) w += self.b return sign(w)
def pred(self,test_set): sums=np.zeros(self.y.shape) for i in range(self.Q+1): sums=sums+self.G[i].pred(self.X).flatten(1)*self.alpha[i] #print sums pre_y=sign(sums) return pre_y
def pred(self,test_set): test_set=np.array(test_set) assert test_set.shape[0]==self.X.shape[0] sums=np.zeros((test_set.shape[1],1)).flatten(1) for i in range(self.Q+1): sums=sums+self.G[i].pred(test_set).flatten(1)*self.alpha[i] #print sums pre_y=sign(sums) return pre_y
def finalclassifer(self,t): ''' the 1 to t weak classifer come together ''' self.sums=self.sums+self.G[t].pred(self.X).flatten(1)*self.alpha[t] #print self.sums pre_y=sign(self.sums) #sums=np.zeros(self.y.shape) #for i in range(t+1): # sums=sums+self.G[i].pred(self.X).flatten(1)*self.alpha[i] # print sums #pre_y=sign(sums) t=(pre_y!=self.y).sum() return t
def prints_test_linear(self): w=0 for t in self.supportVec: w+=self.alpha[t]*self.y[t]*self.X[:,t].flatten(1) w=w.reshape(1,w.size) print np.sum(sign(np.dot(w,self.X)+self.b).flatten(1)!=self.y),"errrr" #print w,self.b x1=0 y1=-self.b/w[0][1] y2=0 x2=-self.b/w[0][0] plt.plot([x1+x1-x2,x2],[y1+y1-y2,y2]) #plt.plot([x1+x1-x2,x2],[y1+y1-y2-1,y2-1]) plt.axis([0,30,0,30]) for i in range(self.M): if self.y[i]==-1: plt.plot(self.X[0,i],self.X[1,i],'or') elif self.y[i]==1: plt.plot(self.X[0,i],self.X[1,i],'ob') for i in self.supportVec: plt.plot(self.X[0,i],self.X[1,i],'oy') plt.show()
def predict(self,x): w=0 for t in self.supportVec: w+=self.alpha[t]*self.y[t]*self.kernel(self.X[:,t],x).flatten(1) w+=self.b return sign(w)