def fit(self, X, Y): M = X[0].get_shape()[1] # number of features N = len(X) # number of bags F = np.random.ranf((1, M)) # regression hyperplane H = np.matrix(np.zeros((N, M))) # bag representations self.P = [] # instance weights self.X_w = [] # flatten instances self.Y_w = [] # flatten instance weights converged = False prev_error = sys.maxsize it = 0 #print "-"*100 #print "e1: %f" % self.e1 #print "e2: %f" % self.e2 #print "e3: %f" % self.e3 #print "M: %d" % M #print "N: %d" % N print print "[+] Training..." print "--/start" while (not converged and it < self.iterations): for i, Xi in enumerate(X): if it == 0: if self.X_w == []: self.X_w = Xi else: self.X_w = vstack([self.X_w, Xi]) self.P.append(np.ones((1, X[i].get_shape()[0]))) self.Y_w.append([]) Xi = Xi.tocsr() if self.f2: HC = np.matrix(self.f2.predict(Xi)).T else: HC = Xi.dot(F.T).T self.f1 = cRLS(alpha=self.e1) self.P[i] = self.f1.fit(HC, Y[i], self.P[i]) self.Y_w[i] = self.f1.coef_ cur_p = csr_matrix(self.f1.coef_) H[i] = cur_p.dot(Xi).todense() self.f2 = linear_model.Ridge(alpha=self.e2) self.f2.fit(H, Y) cur_error = mean_absolute_error(self.f2.predict(H), Y) print "iteration %d -> (MAE: %f) " % (it, cur_error) if prev_error - cur_error < 0.000001: converged = True prev_error = cur_error it += 1 self.f3 = linear_model.Ridge(alpha=self.e3) self.f3.fit(self.X_w, np.hstack(self.Y_w)) print "--/end"
def fit(self, X, Y): M = X[0].get_shape()[1] # number of features N = len(X) # number of bags F = np.random.ranf((1,M)) # regression hyperplane H = np.matrix(np.zeros((N,M))) # bag representations self.P = [] # instance weights self.X_w = [] # flatten instances self.Y_w = [] # flatten instance weights converged = False prev_error = sys.maxsize it = 0 #print "-"*100 #print "e1: %f" % self.e1 #print "e2: %f" % self.e2 #print "e3: %f" % self.e3 #print "M: %d" % M #print "N: %d" % N print print "[+] Training..." print "--/start" while(not converged and it < self.iterations): for i, Xi in enumerate(X): if it == 0: if self.X_w == []: self.X_w = Xi else: self.X_w = vstack([self.X_w, Xi]) self.P.append(np.ones((1,X[i].get_shape()[0]))) self.Y_w.append([]) Xi = Xi.tocsr() if self.f2: HC = np.matrix(self.f2.predict(Xi)).T else: HC = Xi.dot(F.T).T self.f1 = cRLS(alpha=self.e1) self.P[i] = self.f1.fit(HC,Y[i],self.P[i]) self.Y_w[i] = self.f1.coef_ cur_p = csr_matrix(self.f1.coef_) H[i] = cur_p.dot(Xi).todense() self.f2 = linear_model.Ridge(alpha=self.e2) self.f2.fit(H,Y) cur_error = mean_absolute_error(self.f2.predict(H),Y) print "iteration %d -> (MAE: %f) " % (it, cur_error) if prev_error - cur_error < 0.000001: converged = True prev_error = cur_error it += 1 self.f3 = linear_model.Ridge(alpha=self.e3) self.f3.fit(self.X_w,np.hstack(self.Y_w)) print "--/end"
def fit(self, X, Y): M = X[0].get_shape()[1] # number of features N = len(X) # number of instances F = np.random.ranf((1,M)) # hyperplane to be learned H = matrix(np.zeros((N,M))) # bag representations P = [] Y_w = [] X_w = [] converged = False prev_error = 999999 it = 0 print "-"*100 print "L1: %f" % self.l1 print "L2: %f" % self.l2 print "L3: %f" % self.l3 print "M: %d" % M print "N: %d" % N print print "[+] Training..." while(not converged and it < self.iterations): for i, Xi in enumerate(X): if it == 0: if X_w == []: X_w = Xi else: X_w = sparse.vstack([X_w, Xi ]) P.append(np.ones((1,X[i].get_shape()[0]))) Y_w.append([]) Xi = Xi.tocsr() if self.f2: HC = matrix(self.f2.predict(Xi)).T else: HC = Xi.dot(F.T).T self.f1 = cRLS(alpha=self.l1) P[i] = self.f1.fit(HC,Y[i],P[i]) Y_w[i] = self.f1.coef_ cur_p = sparse.csr_matrix(self.f1.coef_) H[i] = cur_p.dot(Xi).todense() self.f2 = linear_model.Ridge(alpha=self.l2) self.f2.fit(H,Y) pred = self.f2.predict(H) cur_error = mean_absolute_error(pred,Y) print "iteration %d -> (MAE: %f) " % (it, cur_error) self.coef_ = self.f2.coef_ if prev_error - cur_error < 0.000001: converged = True self.coef_ = self.f2.coef_ prev_error = cur_error it += 1 Y_w = np.hstack(Y_w) print "Training f3..." self.f3 = linear_model.Ridge(alpha=self.l3) self.f3.fit(X_w,Y_w) self.P = P self.H = H print "--/end" return F
def fit(self, X, Y): M = X[0].get_shape()[1] # number of features N = len(X) # number of instances F = np.random.ranf((1, M)) # hyperplane to be learned H = matrix(np.zeros((N, M))) # bag representations P = [] Y_w = [] X_w = [] converged = False prev_error = 999999 it = 0 print "-" * 100 print "L1: %f" % self.l1 print "L2: %f" % self.l2 print "L3: %f" % self.l3 print "M: %d" % M print "N: %d" % N print print "[+] Training..." while (not converged and it < self.iterations): for i, Xi in enumerate(X): if it == 0: if X_w == []: X_w = Xi else: X_w = sparse.vstack([X_w, Xi]) P.append(np.ones((1, X[i].get_shape()[0]))) Y_w.append([]) Xi = Xi.tocsr() if self.f2: HC = matrix(self.f2.predict(Xi)).T else: HC = Xi.dot(F.T).T self.f1 = cRLS(alpha=self.l1) P[i] = self.f1.fit(HC, Y[i], P[i]) Y_w[i] = self.f1.coef_ cur_p = sparse.csr_matrix(self.f1.coef_) H[i] = cur_p.dot(Xi).todense() self.f2 = linear_model.Ridge(alpha=self.l2) self.f2.fit(H, Y) pred = self.f2.predict(H) cur_error = mean_absolute_error(pred, Y) print "iteration %d -> (MAE: %f) " % (it, cur_error) self.coef_ = self.f2.coef_ if prev_error - cur_error < 0.000001: converged = True self.coef_ = self.f2.coef_ prev_error = cur_error it += 1 Y_w = np.hstack(Y_w) print "Training f3..." self.f3 = linear_model.Ridge(alpha=self.l3) self.f3.fit(X_w, Y_w) self.P = P self.H = H print "--/end" return F