def _common(self, Z, y): scale = Scaler(Z) transform = compose(prepend_x0, Scaler.normalize) X = transform(scale) data = zip(X, y) h_theta0 = [0.] * len(X[0]) coeff = compose(scale.denormalize, get(0), lin_reg(J, gradJ, h_theta0, it_max=2000)) h_thetad = coeff(data) return h_thetad
Z, q = csv_reader('./data/iris.csv', ['SL', 'SW', 'PL', 'PW'], 'Type') # Get Sepal Length and Petal Length features Zp = list(pluck([0, 2], Z)) # Get only the Iris Setosa (0) and Iris Versicolour (1) classes datap = [[f, o] for f, o in zip(Zp, q) if o != 2.0] Xp, yp = zip(*datap) y = list(yp) Xpp = [list(e) for e in Xp] print(Xpp) print(y) # Split set into training and testing data train_data, test_data = train_test_split(zip(Xpp, y), 0.33) # Scale the data X_train, y_train = zip(*train_data) scale = Scaler() scale.fit(X_train) transform = compose(prepend_x0, scale.transform) scaledX_train = transform(X_train) scaled_train = zip(scaledX_train, y_train) # Fit the training data h_theta0 = [1., 1., 1.] print('****Gradient Descent****\n') print('--Training--\n') h_thetaf, cost = glm.fit(logr.logistic_log_likelihood, logr.grad_logistic, h_theta0, scaled_train, eta=0.03, it_max=500, gf='gd')