Example #1
0
def elasticNet(X,y):

    print("\n### ~~~~~~~~~~~~~~~~~~~~ ###")
    print("Lasso Regression")

    ### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
    myDegree = 40
    polynomialFeatures = PolynomialFeatures(degree=myDegree, include_bias=False)
    Xp = polynomialFeatures.fit_transform(X)

    myScaler = StandardScaler()
    scaled_Xp = myScaler.fit_transform(Xp)

    ### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
    elasticNet = ElasticNet(alpha=1e-7,l1_ratio=0.5)
    elasticNet.fit(scaled_Xp,y)

    ### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
    dummyX = np.arange(0,2,0.01)
    dummyX = dummyX.reshape((dummyX.shape[0],1))
    dummyXp = polynomialFeatures.fit_transform(dummyX)
    scaled_dummyXp = myScaler.transform(dummyXp)
    dummyY = elasticNet.predict(scaled_dummyXp)

    outputFILE = 'plot-elasticNet.png'
    fig, ax = plt.subplots()
    fig.set_size_inches(h = 6.0, w = 10.0)
    ax.axis([0,2,0,15])
    ax.scatter(X,y,color="black",s=10.0)
    ax.plot(dummyX, dummyY, color='red', linewidth=1.5)
    plt.savefig(filename = outputFILE, bbox_inches='tight', pad_inches=0.2, dpi = 600)

    ### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
    return( None )
def check_ElasticNet(X, y, pred, tol, reg_alpha, reg_lambda, weights):
    enet = ElasticNet(alpha=reg_alpha + reg_lambda,
                      l1_ratio=reg_alpha / (reg_alpha + reg_lambda))
    enet.fit(X, y)
    enet_pred = enet.predict(X)
    assert np.isclose(weights, enet.coef_, rtol=tol, atol=tol).all()
    assert np.isclose(enet_pred, pred, rtol=tol, atol=tol).all()
Example #3
0
def enet(a):
    print ("Doing elastic net")
    clf3 = ElasticNet(alpha=a)
    clf3.fit(base_X, base_Y)
    print ("Score = %f" % clf3.score(base_X, base_Y))
    clf3_pred = clf3.predict(X_test)
    write_to_file("elastic.csv", clf3_pred)
Example #4
0
def report_ff_en():
    # Fastfood approximation of Gaussian kernel
    para = FastfoodPara(n, d)
    st = time()
    PHI_train, _ = FastfoodForKernel(trainData, para, sgm)
    elapsed_ff_kern_train = time() - st
    st = time()
    PHI_valid, _ = FastfoodForKernel(validationData, para, sgm)
    elapsed_ff_kern_valid = time() - st

    # Train elastic net on projected training data
    en = ElasticNet()
    st = time()
    en.fit(PHI_train.T, trainLabels)
    elapsed_en_fit = time() - st

    # Predict labels for projected validation data
    st = time()
    y_pred = en.predict(PHI_valid.T)
    elapsed_en_pred = time() - st

    # Report performance
    mse_proj = metrics.mean_squared_error(validationLabels, y_pred)
    # print("For projected data, MSE = {:0.4g}.".format(mse_proj))

    return mse_proj, elapsed_en_fit, elapsed_ff_kern_train
Example #5
0
def enet_granger_causality_test(X_t, y_t, top_df, max_iter=10000000):
    """
    Return the cv-parameters tested across the whole data
    :param X_t:
    :param y_t:
    :param top_df:
    :return: res_df, test_betas
    """

    test_errs = np.zeros(len(top_df))
    scores = np.zeros(len(top_df))
    dfs = np.zeros(len(top_df))

    test_coefs = np.zeros((len(top_df), X_t.shape[1]))
    for i in range(len(top_df)):
        alpha = top_df.iloc[i]["alpha"]
        lambda_min = top_df.iloc[i]["lambda.min"]
        enet = ElasticNet(l1_ratio=alpha, alpha=lambda_min, max_iter=max_iter)
        enet.fit(X_t, y_t)
        y_pred = enet.predict(X_t)
        test_errs[i] = np.average((y_t - y_pred)**2)
        scores[i] = enet.score(X_t, y_t)
        test_coefs[i] = enet.coef_

        dfs[i] = len(np.where(enet.coef_)[0])

    top_df["test_err"] = test_errs
    top_df["score"] = scores
    top_df["df"] = dfs


    return top_df, test_coefs
Example #6
0
    def elastic_net(self):
        enet = ElasticNet()
        # features = ['season', 'holiday', 'workingday', 'weather', 'humidity', 'temp', 'windspeed', 'hour', 'month', 'year', 'day_of_week']
        features = ['season', 'workingday', 'weather', 'humidity', 'windspeed', 'hour', 'month', 'year', 'day_of_week']
        enet = ElasticNetCV()
        enet.fit(self.train[features], self.train['log-count'])

        return self.predict(enet, "Elastic Net", features)
def train_model(features_filename):
    training_data = np.loadtxt(features_filename, delimiter=",")

    X = training_data[:, :-1]
    y = training_data[:, -1]

    model = ElasticNet(alpha=1.0, l1_ratio=0.5, fit_intercept=True,
                       precompute='auto', rho=None)
    model.fit(X, y)

    return model
Example #8
0
    def fit_model_12(self,toWrite=False):
        model = ElasticNet(alpha=1.0)

        for data in self.cv_data:
            X_train, X_test, Y_train, Y_test = data
            model.fit(X_train,Y_train)
            pred = model.predict(X_test)
            print("Model 12 score %f" % (logloss(Y_test,pred),))

        if toWrite:
            f2 = open('model12/model.pkl','w')
            pickle.dump(model,f2)
            f2.close()
Example #9
0
 def predict_linear(self, enet=True):
     """How well can we do on this SRFF with a linear regression
     (with optional elastic-net regularisation)?"""
     if enet:
         clf = ElasticNet()
     else:
         clf = LinearRegression()
     # we have to transpose X here because sklearn uses the
     # opposite order (rows v columns). maybe this is a sign that
     # I'm using the wrong order.
     clf.fit(self.train_X.T, self.train_y)
     yhat = clf.predict(self.test_X.T)
     err = self.defn(self.test_y, yhat)
     return clf.intercept_, clf.coef_, err
Example #10
0
def report_orig_en():
    # Train elastic net on original training data
    en = ElasticNet()
    st = time()
    en.fit(trainData.T, trainLabels)
    elapsed_en_fit = time() - st

    # Predict labels for original validation data
    st = time()
    y_pred = en.predict(validationData.T)
    elapsed_en_pred = time() - st

    # Report performance
    mse_orig = metrics.mean_squared_error(validationLabels, y_pred)
    return mse_orig, elapsed_en_fit, 0.
Example #11
0
def sklean_linear_model_elastic_net():
    en = ElasticNet(fit_intercept=True, alpha=0.5)
    boston = load_boston()
    x = boston.data
    y = boston.target

    kf = KFold(len(x), n_folds=10)
    err = 0
    for train, test in kf:
        en.fit(x[train], y[train])
        p = map(en.predict, x[test])
        e = p - y[test]
        err += np.sum(e * e)
    rmse_10cv = np.sqrt(err / len(x))
    print "RMSE on 10-fold CV: {}".format(rmse_10cv)
Example #12
0
def fit_enet(train_X, train_y, test_X):
    """
    Use linear regression to predict. Elastic net is LR with L1 and L2
    regularisation.
    
    :param train_X:
    :param train_y:
    :param test_X:
    :return:
    """
    enet = ElasticNet()
    enet.fit(train_X, train_y)
    model = "ElasticNet int %.2f coefs %s" % (enet.intercept_, pprint(enet.coef_))
    yhat_train = enet.predict(train_X)
    yhat_test = enet.predict(test_X)
    
    return model, yhat_train, yhat_test
Example #13
0
def assert_regression_result(results, tol):
    regression_results = [r for r in results if
                          r["param"]["objective"] == "reg:linear"]
    for res in regression_results:
        X = scale(res["dataset"].X,
                  with_mean=isinstance(res["dataset"].X, np.ndarray))
        y = res["dataset"].y
        reg_alpha = res["param"]["alpha"]
        reg_lambda = res["param"]["lambda"]
        pred = res["bst"].predict(xgb.DMatrix(X))
        weights = xgb_get_weights(res["bst"])[1:]
        enet = ElasticNet(alpha=reg_alpha + reg_lambda,
                          l1_ratio=reg_alpha / (reg_alpha + reg_lambda))
        enet.fit(X, y)
        enet_pred = enet.predict(X)
        assert np.isclose(weights, enet.coef_, rtol=tol,
                          atol=tol).all(), (weights, enet.coef_)
        assert np.isclose(enet_pred, pred, rtol=tol, atol=tol).all(), (
            res["dataset"].name, enet_pred[:5], pred[:5])
    def __init__(self, Dict_TrainingData, Flt_Lambda, Flt_L1):
        # Only for two class
        # Dict_Trainingdata
            # Key : 0,1
            # Row : data
        self.Data1 = Dict_TrainingData[0] # N by 256 matrix
        self.Data2 = Dict_TrainingData[1] # V by 256 matrix
        self.Dim = len(self.Data1[0]) # 256

        self.X = np.concatenate((self.Data1, self.Data2), axis=0) # N / V augmented matrix
        self.X = self.X - np.mean(self.X,axis=0)

        self.NumClass1 = len(self.Data1) # N
        self.NumClass2 = len(self.Data2) # V
        self.TotalNum = self.NumClass1 + self.NumClass2

        self.Y = self.Construct_Y()
        self.D = np.dot(np.transpose(self.Y), self.Y) / float(self.TotalNum) # P
        self.Q = np.ones((2,1))

        InitialTheta = np.array([2,5])
        I = np.eye(2)
        Theta = np.dot(I - np.dot(np.dot(self.Q, np.transpose(self.Q)), self.D ), InitialTheta)
        Theta /= np.sqrt(np.dot(np.dot(np.transpose(Theta), self.D), Theta))

        MaxIter = 10000
        PrevTheta = InitialTheta
        PrevB = np.ones(self.Dim)
        for idx in range(MaxIter):
            NewResp = np.dot(self.Y, Theta)
            elas = ElasticNet(alpha=Flt_Lambda, l1_ratio=Flt_L1)
            #
            # # Compute Coefficient
            # B = lasso.fit(X=self.X, y= NewResp).coef_
            B = elas.fit(X=self.X, y= NewResp).coef_
            # print B
            #
            # New OptScore
            Part1 = I - np.dot(np.dot(self.Q, np.transpose(self.Q)),self.D)
            Part2 = np.dot(Part1, np.linalg.inv(self.D))
            Part3 = np.dot(Part2, np.transpose(self.Y))
            WaveTheta = np.dot(np.dot(Part3, self.X), B)
            # print WaveTheta
            Theta = WaveTheta / np.sqrt(np.dot(np.dot(np.transpose(WaveTheta),self.D),WaveTheta))

            if np.sum(np.abs(B - PrevB)) < 1e-6:
                break
            else:
                PrevB = B

        # print B
        self.B = B 
def lasso(filename, x_train_orig, x_devel_orig, x_test_orig, lab_train_orig, lab_devel_orig, lab_test_orig):

    # Normalize the data
    scaler_data = preprocessing.StandardScaler().fit(x_train_orig.toarray())
    x_train = scaler_data.transform(x_train_orig.toarray())
    x_devel = scaler_data.transform(x_devel_orig.toarray())
    x_test = scaler_data.transform(x_test_orig.toarray())

    scaler_lab = preprocessing.StandardScaler().fit(lab_train_orig)
    lab_train = scaler_lab.transform(lab_train_orig)
    lab_devel = scaler_lab.transform(lab_devel_orig)
    lab_test = scaler_lab.transform(lab_test_orig)

    # Elastic Net

    clf = ElasticNet(alpha = 0.025, l1_ratio = 0.7)
    clf.fit (x_train, lab_train)
    nz = (clf.coef_ != 0)

    # Se guardan los ficheros de parametros resultantes
    dump_svmlight_file(x_train_orig[:, nz], lab_train_orig, filename+"_elasso.train.libsvm", zero_based=False, comment=None, query_id=None)
    dump_svmlight_file(x_devel_orig[:, nz], lab_devel_orig, filename+"_elasso.devel.libsvm", zero_based=False, comment=None, query_id=None)
    dump_svmlight_file(x_test_orig[:, nz], lab_test_orig, filename+"_elasso.test.libsvm", zero_based=False, comment=None, query_id=None)
Example #16
0
def create_ml_classifier(df):
    import operator
    X = np.array(df.drop('base_ip_release',1))
    y = np.array(df['base_ip_release'])
    #clf = LinearRegression()
    clf = ElasticNet(alpha=1,l1_ratio=0.5)
    #clf = Ridge(alpha=2)
    # train_X,test_X,train_y,test_y = cross_validation.train_test_split(X,y,train_size=0.9)
    #
    #
    # sc = StandardScaler()
    # sc.fit(train_X)
    # X_train_std = sc.transform(train_X)
    # X_test_std = sc.transform(test_X)
    #
    # clf.fit(X_train_std,train_y)
    # print clf.predict(X_test_std)
    # print accuracy_score(test_y,clf.predict(X_test_std))


    c = np.zeros(len(X)/10)
    kf = k(len(y),n_folds=10)
    c = 0
    min_dict = {}
    get_error = []
    for train,test in kf:
        get_clif = clf.fit(X[train],y[train])
        p = clf.predict(X[test])
        #print p
        e = (p - y[test])
        #print e, len(e)
        t =  np.dot(e,e)
        # print t
        c += t
        # print c
        #print p, y[test]
        min_dict[t] = get_clif
        get_error.append(t)
    #print min_dict
    min_error = min(get_error)
    print sorted(min_dict.items(),key=operator.itemgetter(0))
    print min_dict[min_error]
    print c
    print np.sqrt(c/len(X))
    return min_dict[min_error]
Example #17
0
def main():
    seq = [[(i * .1, k * .1) for i in range(1, 3)] for k in range(1, 3)]
    seq = list(itertools.chain.from_iterable(seq))

    counter = 1
    boston = datasets.load_boston()
    X = boston.data
    y = boston.target

    kfolds = KFold(X.shape[0], n_folds=4)
    for traini, testi in kfolds:
        alpha, l1 = seq[counter]
        print seq[counter]
        print alpha, l1
        enet = ElasticNet(alpha=alpha, l1_ratio=l1)
        y_pred = enet.fit(X[traini], y[traini]).predict(X[testi])
        score = r2_score(y[testi], y_pred)
        print score
def testLasso():
    # 目标函数加入了对w和样本个数的惩罚
    # 基于稀疏模型的情况,进行线性拟合,这时的效果较好
    import numpy as np
    import matplotlib.pyplot as plt
    from sklearn.metrics import r2_score
    #我们先手动生成一些稀疏数据
    print np.random.seed(42)
    n_samples, n_features = 50, 200
    X = np.random.randn(n_samples, n_features)
    coef = 3 * np.random.randn(n_features) #这个就是实际的参数
    inds = np.arange(n_features)
    np.random.shuffle(inds) #打乱
    coef[inds[10:]] = 0 #生成稀疏数据
    y = np.dot(X, coef) #参数与本地点乘
    #来点噪音
    y += 0.01 * np.random.normal((n_samples,))

    X_train, y_train = X[:n_samples/2], y[:n_samples/2]
    X_test, y_test = X[n_samples/2:], y[n_samples/2:]

    from sklearn.linear_model import Lasso
    alpha = 0.1
    lasso = Lasso(alpha=alpha)

    y_pred_lasso = lasso.fit(X_train,y_train).predict(X_test)
    r2_score_lasso = r2_score(y_test, y_pred_lasso) #这里是0.38
    print lasso
    print "r2_score's result is %f" % r2_score_lasso

    from sklearn.linear_model import ElasticNet
    enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
    y_pred_enet = enet.fit(X_train,y_train).predict(X_test)
    r2_score_enet = r2_score(y_test, y_pred_enet) #0.24 没有lasso好
    print enet
    print "nent's result is %f" % r2_score_enet

    plt.plot(enet.coef_, label='Elastic net coefficients')
    plt.plot(lasso.coef_, label='Lasso coefficients')
    plt.plot(coef, '--', label='original coefficients')
    plt.legend(loc="best")
    plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
              % (r2_score_lasso, r2_score_enet))
    plt.show()
Example #19
0
def imputer_train(col_ind):
    import pandas as pd
    data = pd.DataFrame.from_csv("/Users/DboyLiao/Documents/kaggle/data/Display_Advertising_Challenge/complete_train.csv")
    print "[" + str(col_ind) + "th column] " + "Loading data."
    data = data.set_index("Id")
    data = data.drop("Label", 1)
    col_name = data.columns[col_ind]
    col_classes = ["numeric" if ind <= 12 else "categorical" for ind in range(39)]
    col_class = col_classes[col_ind]
    print "[" + str(col_ind) + "th column] " + "Processing."
    Y = data[col_name]
    X = data.drop(col_name, 1)
    if col_class == 'categorical':
        svc = SVC(C = 10)
        imputer = svc.fit(X, Y)
    elif col_class == 'numeric':
        EN = ElasticNet()
        imputer = EN.fit(X, Y)
    else:
        pass
    return imputer
Example #20
0
def Lasso():
    from sklearn.linear_model import Lasso
    from sklearn.metrics import r2_score
    alpha = 0.1
    lasso = Lasso(alpha=alpha)
    trainDat = shortData
    trainLab = shortLabels
    
    
    lassoPred = lasso.fit(trainDat,trainLab)
    labPredict = lassoPred.predict(testDat)
    r2val = r2_score(testLab,labPredict)
    print(lasso)
    print "r^2 for lasso testing is: ", r2val
    
    from sklearn.linear_model import ElasticNet
    enet = ElasticNet(alpha=alpha, l1_ratio = 0.7)
    enetPred = enet.fit(trainDat, trainLab)
    labPredict_enet = enet.predict(testDat)
    r2val_enet = r2_score(testLab, labPredict_enet)
    print enet
    print "r^2 for enet testing is: ", r2val_enet
Example #21
0
####################################################################################
####################################################################################
#Elastic net blender
####################################################################################
####################################################################################
####################################################################################
from sklearn.linear_model import ElasticNet
# objective function: 1 / (2 * n_samples) * ||y - Xw||^2_2 +
# + alpha * l1_ratio * ||w||_1
# + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2

enet=ElasticNet(alpha=1.0, l1_ratio=0.5, fit_intercept=False, normalize=False, 
    precompute=False, max_iter=1000, copy_X=True, tol=0.0001, warm_start=False, 
    positive=False)

enet_mod=enet.fit(X_net_valid,valid_Y)
pred_holdout=enet_mod.predict(X_net_holdout)

holdout_gini=Gini(holdout_Y,pred_holdout)

valid_rmse=np.sqrt(sum( (pred_holdout[m] - holdout_Y[m])**2 for m in range(len(holdout_Y))) / float(len(holdout_Y)))
print valid_rmse, holdout_gini                     

pred_test=enet_mod.predict(X_net_test)

df=pd.DataFrame(pred_test)
df.columns=['Hazard']
indices=np.loadtxt("X_test_indices.gz",delimiter=",").astype('int32')
df.insert(loc=0,column='Id',value=indices)
df.to_csv("XGB_predictions.csv",sep=",",index=False)
####################################################################################
Example #22
0
# Fastfood approximation of Gaussian kernel
para = FastfoodPara(n, d)
st = time()
PHI_train, _ = FastfoodForKernel(trainData, para, sgm)
elapsed_ff_kern_train = time() - st
print("Took {:0.4g}s to compute training Fastfood expansion.".format(elapsed_ff_kern_train))
st = time()
PHI_valid, _ = FastfoodForKernel(validationData, para, sgm)
elapsed_ff_kern_valid = time() - st
print("Took {:0.4g}s to compute validation Fastfood expansion.".format(elapsed_ff_kern_valid))

# Train elastic net on projected training data
en = ElasticNet()
st = time()
en.fit(PHI_train.T, trainLabels)
elapsed_en_fit = time() - st
print("Took {:0.4g}s to fit elastic net on projected training data.".format(elapsed_en_fit))

# Predict labels for projected validation data
st = time()
y_pred = en.predict(PHI_valid.T)
elapsed_en_pred = time() - st
print("Took {:0.4g}s to predict on projected validation data.".format(elapsed_en_pred))

# Report performance
mse_proj = metrics.mean_squared_error(validationLabels, y_pred)
print("For projected data, MSE = {:0.4g}.".format(mse_proj))

# Train elastic net on original training data
en = ElasticNet()
    print ridge_scores.mean()
    print ridge_scores

# combination of ridge and Lasso
print "Elastic net regularization"

for alpha in range(1,5):
    elastic_net = ElasticNet(alpha)
    elastic_net_scores =cross_val_score(elastic_net, x, y, cv = 5)
    print "alpha={a}".format(a=alpha)
    print elastic_net_scores.mean()
    print elastic_net_scores

# best performing regressor for this data set was Elastic net with alpha=1
# with score = 0.472705248975
# draw scatter plot for values predicted with this regressor

print "Showing scatter plot for elastic net with alpha = 1"

elastic_net = ElasticNet(1)
elastic_net.fit(x, y)
predicted_y = elastic_net.predict(x)

fig = plt.figure()
plt.scatter(y, predicted_y, alpha=0.3, )
fig.suptitle('Boston real estate pricing', fontsize=20)
plt.figtext(.5,.9,'Elastic net regularization, alpha=1', fontsize=15, ha='center')
plt.xlabel('Actual value, $1000s', fontsize=18)
plt.ylabel('Predicted value, $1000s', fontsize=18)
plt.show()
                                                 scoring=scoring)
    results.append(cv_results)
    names.append(name)
    msg = "MAE: %s %.3f (%.3f)" % (name, cv_results.mean(), cv_results.std())
    print(msg)

# Compare Algorithms
fig = plt.figure(figsize=(10, 10))
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.show()

model = ElasticNet()
model.fit(X_train, Y_train)

feat_imp = pd.Series(model.feature_importances_,
                     X_train.columns).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')

best_features = feat_imp[feat_imp > 0]
best_features_columns = list(best_features.index)

predictions = model.predict(X_test)
print(mean_absolute_error(Y_test, predictions))
print(mean_squared_error(Y_test, predictions))
print(r2_score(Y_test, predictions))

prediction_df = pd.DataFrame(predictions, columns=["prediction"])
Example #25
0



#########################################
# ELASTIC NET MODEL

from sklearn.linear_model import ElasticNet

regr = ElasticNet(random_state=0)

regr.alpha = 1.9

regr.l1_ratio = 0.65

regr.fit(X_train,y_train)

# Calling the score method, which compares the predicted values to the actual values

y_score = regr.score(X_test, y_test)

# The score is directly comparable to R-Square
print(y_score)



#########
# Theil sen model


from sklearn.linear_model import TheilSenRegressor # Theil Sen Regressor Model
Example #26
0
#%%
''' 
this is a implementation of the Lasso Regression
'''
from sklearn.linear_model import Lasso
lasso_reg = Lasso(alpha=0.1)
lasso_reg.fit(X_p, Y_p)
lasso_reg.predict([[1.5]])

#%%
''' 
this is a implementation of the Elastic net
'''
from sklearn.linear_model import ElasticNet
elastic_net = ElasticNet(alpha=0.1, l1_ratio=0.5)
elastic_net.fit(X_p, Y_p)
elastic_net.predict([[1.5]])

#%%
'''
this is the implementation of a simple Logistic regression
'''
from sklearn import datasets
iris = datasets.load_iris()
list(iris.keys())

#%%
X = iris["data"][:, 3:]  # petal width
X
#%%
Y = (iris["target"] == 2).astype(np.int)  # 1 if Iris-Virginica, else 0
Example #27
0
# Importing libraries
import numpy as np
import pandas as pd

# Importing the dataset
X_train = pd.read_csv("X_train.csv")
y_train = pd.read_csv("y_train.csv")["SalePrice"]
X_test = pd.read_csv("X_test.csv")
test_ID = pd.read_csv("test_ID.csv")['Id']

#Create ElasticNet Regression Model
from sklearn.linear_model import ElasticNet

Elastic_Net = ElasticNet(random_state=42)
Elastic_Net.fit(X_train, y_train)

# Applying k-Fold Cross Validation
from sklearn.model_selection import cross_val_score

accuracies = cross_val_score(estimator=Elastic_Net,
                             X=X_train,
                             y=y_train,
                             cv=10)
accuracies.mean()
accuracies.std()

# Applying Grid Search to find the best parameters
from sklearn.model_selection import GridSearchCV

parameters = [{
Example #28
0
def ELasticNet_Eval(class_targ,elem,data,pred=[],name=''):
    
    #function that performs an ElasticNet modelisation from data as a pd.Dataframe()
    #####################################################################################
    #class_targ gives the target - parameter to predict
    #elem corresponds to the name of the target - will appear on graphs and saved files
    #pred is a list of the predictors used to build the model
    #name = accessory parameter to further personalize names of saved files / graphs
    
    'built-in of predictors'
    #clean_up cleans up the dataframe of predictors + target
    #erases rows with NaN values
    clean_up=pd.DataFrame()
    for i in pred:
        clean_up=pd.concat([clean_up,data[i]],axis=1)
        clean_up_scale=clean_up
    clean_up=pd.concat([clean_up,data[class_targ]],axis=1)
    
    clean_up.dropna(inplace=True)
    clean_up_scale.dropna(inplace=True)

    #predictors is a dataframe of predictors
    predictors=pd.DataFrame()
    for i in pred:
        predictors=pd.concat([predictors,clean_up[i]],axis=1)
    list_pred=list(predictors.columns)
    predictors.index = pd.RangeIndex(len(predictors.index))
    
    #scale dataset - scaling on the predictors - all rows considered
    scaler=StandardScaler()
    scaler.fit(clean_up_scale)
    predictors=scaler.transform(predictors)
    predictors=pd.DataFrame(predictors)
    predictors.columns=list_pred       
            
    'built-in of the target'
    target=pd.DataFrame()
    target=pd.concat([target,clean_up[class_targ]],axis=1)
    target = target.reset_index(drop=True)
    
    'check on the data'
    print(target)
    print(predictors)

    'define stratification used for cross-validation of hyper parameters'
    stratified=RepeatedKFold(n_splits=3,n_repeats=15)
 
    'grid search for ELasticNet'
    param_ElasticNet={'alpha': [1,0.1,0.05,0.02,0.01,0.005,0.002,0.001,0.0005,0.0001, 0.00005, 0.00001],'l1_ratio':list(np.arange(0.0, 1.0, 0.1))}
    ElaNet=ElasticNet()
    grid_s_lass=GridSearchCV(ElaNet,param_ElasticNet,cv=stratified, scoring='neg_median_absolute_error')
    grid_s_lass.fit(predictors, target)

    #Best params are kept to build the model
    ElaNet_opt=ElasticNet(alpha=grid_s_lass.best_params_['alpha'],l1_ratio=grid_s_lass.best_params_['l1_ratio'])
    model=ElaNet_opt.fit(predictors,target)
    
    #saving the model for re-use
    flag='finalized_model_ElasticNet_%s' % (elem+'_'+name)
    filename = '%s.sav' %flag
    pickle.dump(model, open(filename,'wb'))
    
    #coefficient plot saved to directory
    lasso_coef=model.coef_
    figure1= plt.figure(figsize = (10,10))
    print("Tuned ElasticNet Parameters: {}".format(grid_s_lass.best_params_))
    print("Best score is {}".format(grid_s_lass.best_score_))
    print("ElasticNet coefficients per predictor: {}".format(lasso_coef))
    plt.plot(range(len(predictors.columns)), lasso_coef)
    plt.xticks(range(len(predictors.columns)), predictors.columns.values)
    plt.xticks(rotation=90)
    plt.tight_layout(pad=1.5, w_pad=1.5, h_pad=1.5)
    #plt.margins(0.02)
    plt.xlabel('predictors')
    plt.ylabel('relative importance of the predictors used')
    plt.title(elem)
    figure1.savefig("coefficients_%s.png" %(elem+'_'+name))
    plt.clf()
    
    loaded_model = pickle.load(open(filename, 'rb'))
    
    
    return loaded_model#,scaler,predictors,target
Example #29
0
lasso = Lasso(alpha=alpha)

y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)

# %%
# ElasticNet
# ---------------------------------------------------

from sklearn.linear_model import ElasticNet

enet = ElasticNet(alpha=alpha, l1_ratio=0.7)

y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)


# %%
# Plot
# ---------------------------------------------------

m, s, _ = plt.stem(
    np.where(enet.coef_)[0],
    enet.coef_[enet.coef_ != 0],
    markerfmt="x",
    label="Elastic net coefficients",
    use_line_collection=True,
Example #30
0
def train(in_alpha, in_l1_ratio):
    import os
    import warnings
    import sys

    import pandas as pd
    import numpy as np
    from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
    from sklearn.model_selection import train_test_split
    from sklearn.linear_model import ElasticNet

    import mlflow
    import mlflow.sklearn

    def eval_metrics(actual, pred):
        rmse = np.sqrt(mean_squared_error(actual, pred))
        mae = mean_absolute_error(actual, pred)
        r2 = r2_score(actual, pred)
        return rmse, mae, r2

    warnings.filterwarnings("ignore")
    np.random.seed(40)

    # Read the wine-quality csv file (make sure you're running this from the root of MLflow!)
    #  Assumes wine-quality.csv is located in the same folder as the notebook
    wine_path = "wine-quality.csv"
    data = pd.read_csv(wine_path)

    # Split the data into training and test sets. (0.75, 0.25) split.
    train, test = train_test_split(data)

    # The predicted column is "quality" which is a scalar from [3, 9]
    train_x = train.drop(["quality"], axis=1)
    test_x = test.drop(["quality"], axis=1)
    train_y = train[["quality"]]
    test_y = test[["quality"]]

    # Set default values if no alpha is provided
    if float(in_alpha) is None:
        alpha = 0.5
    else:
        alpha = float(in_alpha)

    # Set default values if no l1_ratio is provided
    if float(in_l1_ratio) is None:
        l1_ratio = 0.5
    else:
        l1_ratio = float(in_l1_ratio)

    mlflow.set_tracking_uri(
        "http://mlflow-server-svc-vpavlin-jupyterhub.cloud.paas.upshift.redhat.com"
    )
    # Useful for multiple runs (only doing one run in this sample notebook)
    with mlflow.start_run():
        # Execute ElasticNet
        lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42)
        lr.fit(train_x, train_y)

        # Evaluate Metrics
        predicted_qualities = lr.predict(test_x)
        (rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)

        # Print out metrics
        print("Elasticnet model (alpha=%f, l1_ratio=%f):" % (alpha, l1_ratio))
        print("  RMSE: %s" % rmse)
        print("  MAE: %s" % mae)
        print("  R2: %s" % r2)

        # Log parameter, metrics, and model to MLflow
        mlflow.log_param("alpha", alpha)
        mlflow.log_param("l1_ratio", l1_ratio)
        mlflow.log_metric("rmse", rmse)
        mlflow.log_metric("r2", r2)
        mlflow.log_metric("mae", mae)

        mlflow.sklearn.log_model(lr, "model")
Example #31
0
class SlimModel(object):
    def __init__(self, data, num_users, num_items, l1_ratio, alpha, epochs):

        self._data = data
        self._num_users = num_users
        self._num_items = num_items
        self._l1_ratio = l1_ratio
        self._alpha = alpha
        self._epochs = epochs

        self.md = ElasticNet(alpha=self._alpha,
                             l1_ratio=self._l1_ratio,
                             positive=True,
                             fit_intercept=False,
                             copy_X=False,
                             precompute=True,
                             selection='random',
                             max_iter=self._epochs,
                             random_state=42,
                             tol=1e-3)

        self._w_sparse = None
        self._A_tilde = None

    def train(self, verbose):
        train = self._data.sp_i_train_ratings

        dataBlock = 10000000

        rows = np.empty(dataBlock, dtype=np.int32)
        cols = np.empty(dataBlock, dtype=np.int32)
        values = np.empty(dataBlock, dtype=np.float32)

        numCells = 0

        start_time = time.time()
        start_time_printBatch = start_time

        for currentItem in range(self._num_items):
            y = train[:, currentItem].toarray()

            # set the j-th column of X to zero
            start_pos = train.indptr[currentItem]
            end_pos = train.indptr[currentItem + 1]

            current_item_data_backup = train.data[start_pos:end_pos].copy()
            train.data[start_pos:end_pos] = 0.0

            # fit one ElasticNet model per column
            self.md.fit(train, y)

            nonzero_model_coef_index = self.md.sparse_coef_.indices
            nonzero_model_coef_value = self.md.sparse_coef_.data

            local_topK = min(len(nonzero_model_coef_value) - 1, 100)

            relevant_items_partition = (
                -nonzero_model_coef_value
            ).argpartition(local_topK)[0:local_topK]
            relevant_items_partition_sorting = np.argsort(
                -nonzero_model_coef_value[relevant_items_partition])
            ranking = relevant_items_partition[
                relevant_items_partition_sorting]

            for index in range(len(ranking)):
                if numCells == len(rows):
                    rows = np.concatenate(
                        (rows, np.zeros(dataBlock, dtype=np.int32)))
                    cols = np.concatenate(
                        (cols, np.zeros(dataBlock, dtype=np.int32)))
                    values = np.concatenate(
                        (values, np.zeros(dataBlock, dtype=np.float32)))

                rows[numCells] = nonzero_model_coef_index[ranking[index]]
                cols[numCells] = currentItem
                values[numCells] = nonzero_model_coef_value[ranking[index]]

                numCells += 1

            train.data[start_pos:end_pos] = current_item_data_backup

            if verbose and (time.time() - start_time_printBatch > 300 or
                            (currentItem + 1) % 1000 == 0
                            or currentItem == self._num_items - 1):
                print(
                    '{}: Processed {} ( {:.2f}% ) in {:.2f} minutes. Items per second: {:.0f}'
                    .format('SLIMElasticNetRecommender', currentItem + 1,
                            100.0 * float(currentItem + 1) / self._num_items,
                            (time.time() - start_time) / 60,
                            float(currentItem) / (time.time() - start_time)))

                sys.stdout.flush()
                sys.stderr.flush()

                start_time_printBatch = time.time()

        # generate the sparse weight matrix
        self._w_sparse = sp.csr_matrix(
            (values[:numCells], (rows[:numCells], cols[:numCells])),
            shape=(self._num_items, self._num_items),
            dtype=np.float32)

        train = train.tocsr()
        self._A_tilde = train.dot(self._w_sparse).A

    def predict(self, u, i):
        return self._A_tilde[u, i]

    def get_user_recs(self, user, k=100):
        user_items = self._data.train_dict[user].keys()
        predictions = {
            i: self.predict(user, i)
            for i in self._data.items if i not in user_items
        }
        indices, values = zip(*predictions.items())
        indices = np.array(indices)
        values = np.array(values)
        partially_ordered_preds_indices = np.argpartition(values, -k)[-k:]
        real_values = values[partially_ordered_preds_indices]
        real_indices = indices[partially_ordered_preds_indices]
        local_top_k = real_values.argsort()[::-1]
        return [(real_indices[item], real_values[item])
                for item in local_top_k]

    def get_model_state(self):
        saving_dict = {}
        saving_dict['_A_tilde'] = self._A_tilde
        return saving_dict

    def set_model_state(self, saving_dict):
        self._A_tilde = saving_dict['_A_tilde']
Example #32
0
class SLIM():
    
    
    def __init__(self, alpha, l1_ratio, user_num, item_num, lin_model='elastic',):
        if lin_model == 'lasso':
            self.reg = Lasso(alpha=alpha, positive=True)
        elif lin_model == 'elastic':
            self.reg = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, positive=True)
            #self.reg = GLM(distr='gaussian', alpha=l1_ratio, reg_lambda=alpha)
            
        self.user_num = user_num
        self.item_num = item_num

            
    def fit(self, user_item_train_df):
        # rating_mat
        self.row = np.array([r[0] for r in user_item_train_df.values], dtype=int)
        self.col = np.array([r[1] for r in user_item_train_df.values], dtype=int)
        self.data = np.ones(len(user_item_train_df), dtype=int)
        self.rating_mat = csr_matrix((self.data, (self.row, self.col)), shape = (self.user_num, self.item_num))
        
        # linear modelを解く
        sim_mat = []
        for i in range(self.item_num):
            X = self.del_col(i)
            y = self.rating_mat[:, i]
    
            self.reg.fit(X.toarray(), y.toarray())
            w = np.insert(self.reg.coef_, i, 0)[:,  np.newaxis]
            sim_mat.append(w)
    
            #if i > 1:
            #    break

        self.sim_mat = np.concatenate(sim_mat, axis=1)


    def fit_multi(self, user_item_train_df):
        # rating_mat
        self.row = np.array([r[0] for r in user_item_train_df.values], dtype=int)
        self.col = np.array([r[1] for r in user_item_train_df.values], dtype=int)
        self.data = np.ones(len(user_item_train_df), dtype=int)
        self.rating_mat = csr_matrix((self.data, (self.row, self.col)), shape = (self.user_num, self.item_num))
        
        # linear modelを解く
        sim_mat = Parallel(n_jobs=-1)([delayed(self.solve_lin_model)(n) for n in range(self.item_num)])
        self.sim_mat = np.concatenate(sim_mat, axis=1)

        
    def solve_lin_model(self, idx):
        X = self.del_col(idx)
        y = self.rating_mat[:, idx]
        self.reg.fit(X.toarray(), y.toarray())
        w = np.insert(self.reg.coef_, idx, 0)[:,  np.newaxis]
        return w
        

    def del_col(self, col_idx):
        row_new = self.row[self.col != col_idx]
        col_new = self.col[self.col != col_idx]
        col_new[col_new > col_idx] = col_new[col_new > col_idx] - 1
        data_new = self.data[self.col != col_idx]
    
        return csr_matrix((data_new, (row_new, col_new)), shape = (self.user_num, self.item_num-1))

    
    def load_sim_mat(self, path, user_item_train_df):
        # rating mat
        self.row = np.array([r[0] for r in user_item_train_df.values], dtype=int)
        self.col = np.array([r[1] for r in user_item_train_df.values], dtype=int)
        self.data = np.ones(len(user_item_train_df), dtype=int)
        self.rating_mat = csr_matrix((self.data, (self.row, self.col)), shape = (self.user_num, self.item_num))
        
        self.sim_mat = np.loadtxt(path)

    
    def save_sim_mat(self, path):
        np.savetxt(path, self.sim_mat)

        
    def predict(self):
        pred_mat = np.dot(self.rating_mat.toarray(), self.sim_mat)
        self.rec_mat = pred_mat - self.rating_mat

    def pred_ranking(self, user_id):
        # あるユーザの予測ランキングを返す
        #rec_mat = self.pred_mat - self.rating_mat
        row_user = self.rec_mat[user_id, :]
        #print(row_user)
        rec_item_idx = np.argsort(row_user)[::-1]

        return np.array(rec_item_idx)[0, :]
Example #33
0
# print('lasso_best_score_',lasso_GS.best_score_)
# print('lasso_best_params_',lasso_GS.best_params_)
lasso_clf = linear_model.Lasso(alpha=0.1)
lasso_clf.fit(train, y_train)
lasso_train = lasso_clf.predict(train)
print(r2_score(y_train, lasso_train))
# lasso_pred=lasso_clf.predict(test)
# # 若预测值是负数,则取0
# lasso_pred = map(lambda x: x if x >= 0 else 0, lasso_pred)
# submit['y']=list(lasso_pred)
# submit.to_csv('LassoPrediction.csv',index=False)

EN_clf = ElasticNet(alpha=0.1, l1_ratio=0.9)
# EN_param={'alpha':np.arange(0.1,1,0.1),
#           'l1_ratio':np.arange(0.1,1,0.1)
#           }
# EN_GS=GridSearchCV(EN_clf,EN_param,cv=5)
# EN_GS.fit(train,y_train)
# print('lasso_best_estimator_',EN_GS.best_estimator_)
# print('lasso_best_score_',EN_GS.best_score_)
# print('lasso_best_params_',EN_GS.best_params_)

EN_clf.fit(train, y_train)
EN_train = EN_clf.predict(train)
print(r2_score(y_train, EN_train))
EN_pred = EN_clf.predict(test)
# 若预测值是负数,则取0
EN_pred = map(lambda x: x if x >= 0 else 0, EN_pred)
submit['y'] = list(EN_pred)
submit.to_csv('ENPrediction.csv', index=False)
Example #34
0
def naive_spca(data, no_of_ev_of_gram, r):

    no_of_points = data.shape[0]
    feature_size = data.shape[1]
    sparse_pc_old = np.zeros((feature_size, no_of_ev_of_gram))
    ################################
    #SVD gives u s v.T and not u s v
    ################################
    u, s, v = np.linalg.svd(data, full_matrices=0)
    print(s)
    s2 = np.diag(s)
    for i in range(s2.shape[0]):
        s2[i, i] = 1 / np.sqrt(s[i])
    #print(v[0:5,:].T)
    #print(np.dot(v[0:5,:],v[0:5,:].T))

    j = 0
    sparse_pc_list = []
    sparse_pc_list.append(sparse_pc_old)

    while (j < 6):
        sparse_pc = np.zeros((feature_size, no_of_ev_of_gram))
        if j == 0:
            a = v[0:no_of_ev_of_gram, 0:].T
        j = j + 1
        for i in range(no_of_ev_of_gram):
            y = data.dot(a[0:, i])
            #y = np.dot(data,a[0:,i])
            elastic = ElasticNet(alpha=1, l1_ratio=r, max_iter=40000)
            elastic.fit(data * np.sqrt(2 * no_of_points),
                        y * np.sqrt(2 * no_of_points))
            pc = elastic.coef_
            #print(pc)
            sparse_pc[0:, i] = pc
        u1, s1, v1 = np.linalg.svd(np.dot(
            np.dot(np.dot(s2, u.T), np.dot(data.T, data)), sparse_pc),
                                   full_matrices=0)
        #u1,s1,v1=np.linalg.svd(np.dot(np.dot(data.T,data),sparse_pc),full_matrices=0)
        #pdb.set_trace()
        #a = u1[0:,0:sig_dim].dot(v1)
        a = np.dot(np.dot(u, s2), u1.dot(v1))
        #a = u1.dot(v1)
        #print(sparse_pc)
        if ((np.linalg.norm(sparse_pc - sparse_pc_list[j - 1],
                            ord='fro'))) < 0.0008:
            #print((np.linalg.norm(sparse_pc-sparse_pc_list[j-1],ord='fro')))
            sparse_pc_list.append(sparse_pc)
            break
        sparse_pc_list.append(sparse_pc)
    #print(sparse_pc_list)
    nrm = np.sqrt(
        np.sum(sparse_pc_list[len(sparse_pc_list) - 1] *
               sparse_pc_list[len(sparse_pc_list) - 1],
               axis=0))
    #print(nrm)
    #sparse_pc_list[len(sparse_pc_list)-1]=sparse_pc_list[len(sparse_pc_list)-1]/nrm
    for i in range(no_of_ev_of_gram):
        sc = np.sqrt(
            np.dot(sparse_pc_list[len(sparse_pc_list) - 1][:, i].T,
                   np.dot(data, sparse_pc_list[len(sparse_pc_list) - 1][:,
                                                                        i])))
        sparse_pc_list[len(sparse_pc_list) -
                       1][:,
                          i] = (1 / sc) * sparse_pc_list[len(sparse_pc_list) -
                                                         1][:, i]
        #sparse_pc_list[len(sparse_pc_list)-1][:,i] = (1/np.sqrt(s[i]))*sparse_pc_list[len(sparse_pc_list)-1][:,i]
    return sparse_pc_list[len(sparse_pc_list) - 1]
Example #35
0
 def fit(self, *args, **kwargs):
     return ElasticNet.fit(self, *args, **kwargs)
Example #36
0
 def BuildModel(self, data, labels):
   # Create and train the classifier.
   elasticNet = SElasticNet(alpha=self.rho,
                            l1_ratio=self.alpha)
   elasticNet.fit(data, labels)
   return elasticNet
print X_test.shape


y_train=df_train["Purchase"]
df_train=df_train.drop("Purchase", axis=1)

#from sklearn.feature_selection import SelectKBest
#from sklearn.feature_selection import f_regression
#sel = SelectKBest(f_regression, k=10)
#X_tr=pd.DataFrame(sel.fit_transform(X_train,y_train))
#X_tst=pd.DataFrame(sel.transform(X_test))

#print X_tr.shape
#print X_tst.shape

from sklearn.linear_model import ElasticNet
model=ElasticNet(alpha=0.001)

model.fit(X_train,y_train)
y_pred=model.predict(X_test)
#print y_pred.shape
#print key1.shape
#print key2.shape


out=pd.DataFrame()
out["User_ID"]=key1
out["Product_ID"]=key2
out["Purchase"]=y_pred
out.to_csv('outavb.csv', index=False)
Example #38
0
print('mean_square_error:', mean_squared_error(y_pca_test, y_pca_SVRPred))
print('r2:', r2_score(y_pca_test, y_pca_SVRPred))
print('MAE:', mean_absolute_error(y_pca_test, y_pca_SVRPred))

from sklearn import linear_model

Lasso = linear_model.Lasso(alpha=1.3)
regrLasso = Lasso.fit(X_train, y_train)
y_LassPred = regrLasso.predict(X_test)
print('r2:', r2_score(y_LassPred, y_test))

from sklearn.linear_model import ElasticNet
from sklearn.datasets import make_regression

regr = ElasticNet(random_state=0)
regrElastic = regr.fit(X_train, y_train)
y_ElasPred = regrElastic.predict(X_test)
print('r2:', r2_score(y_ElasPred, y_test))

from sklearn.tree import DecisionTreeRegressor

DT = DecisionTreeRegressor(max_depth=5)
DTRegre = DT.fit(X_train, y_train)
y_DTPred = DTRegre.predict(X_test)
print('r2:', r2_score(y_DTPred, y_test))
print(y_DTPred)
plt.scatter(y_test, y_DTPred)

from sklearn.neighbors import KNeighborsRegressor

neigh = KNeighborsRegressor(n_neighbors=1)
Example #39
0
    def test2_diabetes_test(cls):
        dataset = datasets.load_diabetes()
        Y=dataset.target
        X=dataset.data
        x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.3)
        
        print("X Shape: ", X.shape)
        print("Y Shape: ", Y.shape)
        print("X_Train Shape: ", x_train.shape)
        print("X_Test Shape: ", x_test.shape)
        print("Y_Train Shape: ", y_train.shape)
        print("Y_Test Shape: ", y_test.shape)

        scaler = preprocessing.StandardScaler().fit(x_train)
        x_train = scaler.transform(x_train)
        x_test = scaler.transform(x_test)
        
        x_test = np.array(x_test)
        y_test = np.array(y_test)       

        l1 = 0
        l2 = 0
        best_result_l1=0
        best_result_l2=0
        print("Find best score with best values")
        score_best = 0
        alpha = [1, 0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001]
        best_alpha = alpha[0]
        for k in alpha:
            for i in range(20):
                for j in range(20):
                    mySGDElastic = EN.ElasticNetRegressor(n_epoch=20, alpha=k, delta=2, batch_size=10, l1_coef=l1, l2_coef=l2)
                    mySGDElastic.fit(x_train, y_train)
                    test = mySGDElastic.predict(x_test)
                    score = mySGDElastic.score(y_test, test)
                    if score_best < score < 1:
                        best_result_l1 = l1
                        best_result_l2 = l2
                        best_alpha = k
                        score_best = score
                    l2+=1
                l1+=1
                l2=0
        print("Best alpha: ", best_alpha)
        print("Best l1 value: ", best_result_l1) 
        print("Best l2 value: ", best_result_l2)
        print("Best score with this values: ", score_best)

        regr = ElasticNet(random_state=0)
        regr.fit(x_train, y_train)
        y_sklearn_test = regr.predict(x_test)


        mySGDElastic = EN.ElasticNetRegressor(n_epoch=20, alpha=best_alpha, delta=2, batch_size=10, l1_coef=best_result_l1, l2_coef=best_result_l2)
        mySGDElastic.fit(x_train, y_train)
        y_my_test = mySGDElastic.predict(x_test)
        score1 = mySGDElastic.score(y_test, y_my_test)
        score2 = mySGDElastic.score(y_test, y_sklearn_test)
        score3 = mySGDElastic.score(y_sklearn_test, y_my_test)
        print("My score in relation to tests: ", score1)
        print("Sklearn score in relation to tests: ", score2)
        print("My score in relation to result sklearn: ", score3)
        plt.scatter(y_test, y_my_test, color="blue", label="my prediction")
        plt.scatter(y_test, y_sklearn_test, color="red", label="sklearn prediction")
        plt.xlabel("Salary y test")
        plt.ylabel("Salary y prediction")
        plt.legend()
        plt.grid(True)
        plt.show()
        return score1, score2, score3
import matplotlib.pyplot as plt
from sklearn.linear_model import ElasticNet
from pandas import Series, DataFrame
from sklearn.model_selection import train_test_split

#Importing the Training and Test Files
train = pd.read_csv('Train.csv')
test = pd.read_csv('Test.csv')

#Splitting into Training and CV for Cross Validation
X = train.loc[:, ['Outlet_Establishment_Year', 'Item_MRP']]
x_train, x_cv, y_train, y_cv = train_test_split(X, train.Item_Outlet_Sales)

#ElasticNet Regression
ENreg = ElasticNet(alpha=1, l1_ratio=0.5, normalize=False)
ENreg.fit(x_train, y_train)
pred = ENreg.predict(x_cv)

#Calculating the mean squared error
mse = np.mean((pred - y_cv)**2)
print('Mean Squared Error:', mse)
print('Score:', ENreg.score(x_cv, y_cv))

#Calculation of coefficients
coeff = DataFrame(x_train.columns)
coeff['Coefficient Estimate'] = Series(ENreg.coef_)
print(coeff)

#Plotting Analysis through a Residual Plot
x_plot = plt.scatter(pred, (pred - y_cv), c='b')
plt.hlines(y=0, xmin=-1000, xmax=5000)
Example #41
0
def fn_ames_en(df_all):
    import re
    import numpy as np
    import pandas as pd
    import pickle

    from sklearn.linear_model import ElasticNetCV, ElasticNet

    import matplotlib.pyplot as plt

    def convert(name):
        s1 = re.sub('\.', '_', name)
        return s1.lower()

    def fn_MAE(actuals, predictions):
        return np.round(np.mean(np.abs(predictions - actuals)), 0)

    def fn_tosplines(x):
        x = x.values
        # hack: remove zeros to avoid issues where lots of values are zero
        x_nonzero = x[x != 0]
        ptiles = np.percentile(x_nonzero, [10, 20, 40, 60, 80, 90])
        # print(var, ptiles)
        df_ptiles = pd.DataFrame({var: x})
        for idx, ptile in enumerate(ptiles):
            df_ptiles[var + '_' + str(idx)] = np.maximum(0, x - ptiles[idx])
        return (df_ptiles)

    # change column names to closer to camel case
    colnames = df_all.columns.values
    colnames = list(map(convert, colnames))
    df_all.columns = colnames
    del convert, colnames

    # define variables
    vars_all = df_all.columns.values
    var_dep = ['saleprice']

    vars_notToUse = ['order', 'pid']
    vars_ind = [
        var for var in vars_all if var not in (vars_notToUse + var_dep)
    ]
    vars_ind_numeric = list(
        df_all[vars_ind].columns[df_all[vars_ind].dtypes != 'object'])

    # Deal with missings as per 02a
    vars_toDrop = ['lot_frontage', 'garage_yr_blt', 'mas_vnr_area']
    df_all.drop(labels=vars_toDrop, axis=1, inplace=True)

    vars_ind = [var for var in vars_ind if var not in vars_toDrop]
    vars_ind_numeric = [
        var for var in vars_ind_numeric if var not in vars_toDrop
    ]
    df_all.dropna(inplace=True)

    # remove outliers
    df_all = df_all[df_all['gr_liv_area'] <= 4000]
    df_all.reset_index(drop=True, inplace=True)

    # create onehot columns
    vars_ind_categorical = df_all.columns[df_all.dtypes == 'object'].tolist()
    vars_ind_onehot = []

    df_all_onehot = df_all.copy()

    for col in vars_ind_categorical:
        # use pd.get_dummies on  df_all[col]
        df_oh = pd.get_dummies(df_all[col], drop_first=False)
        # Find the name of the most frequent column
        col_mostFreq = df_oh.sum(axis=0).idxmax()
        # Drop the column of the most frequent category (using df_oh.drop)
        df_oh = df_oh.drop(col_mostFreq, axis=1)
        # Rename the columns to have the original variable name as a prefix
        oh_names = col + '_' + df_oh.columns
        df_oh.columns = oh_names
        df_all_onehot = pd.concat([df_all_onehot, df_oh], axis=1, sort=False)
        del df_all_onehot[col]
        vars_ind_onehot.extend(oh_names)

    # create fold
    rng = np.random.RandomState(2018)
    fold = rng.randint(0, 10, df_all.shape[0])
    df_all_onehot['fold'] = fold

    # rename df_all_onehot to df_all as this is now the data we will be using for
    # the rest of this work
    df_all = df_all_onehot
    del df_all_onehot

    # define index for train, val, design, test
    idx_train = np.where(df_all['fold'].isin(np.arange(0, 6)))[0]
    idx_val = np.where(df_all['fold'].isin([6, 7]))[0]
    idx_design = np.where(df_all['fold'].isin(np.arange(0, 8)))[0]
    idx_test = np.where(df_all['fold'].isin([8, 9]))[0]

    # standardise features
    for var in vars_ind_numeric:
        x = df_all[var].values
        x -= np.mean(x, axis=0)
        x /= np.sqrt(np.mean(x**2, axis=0))
        df_all[var] = x

    vars_ind_tospline = df_all[vars_ind_numeric].columns[(
        df_all[vars_ind_numeric].nunique() > 8)].tolist()

    for var in vars_ind_tospline:
        df_ptiles = fn_tosplines(df_all[var])
        df_all.drop(columns=[var], inplace=True)
        vars_ind_numeric.remove(var)
        df_all = pd.concat([df_all, df_ptiles], axis=1, sort=False)
        vars_ind_numeric.extend(df_ptiles.columns.tolist())

    vars_ind = vars_ind_onehot + vars_ind_numeric

    X = df_all[vars_ind].values
    y = df_all[var_dep].values

    X_design = X[idx_design, :]
    X_test = X[idx_test, :]
    y_design = df_all[var_dep].iloc[idx_design].copy().values.ravel()
    y_test = df_all[var_dep].iloc[idx_test].copy().values.ravel()

    X = df_all[vars_ind].values
    y = df_all[var_dep].values

    X_train = X[idx_train, :]
    X_val = X[idx_val, :]
    X_design = X[idx_design, :]
    X_test = X[idx_test, :]

    y_train = df_all[var_dep].iloc[idx_train].copy().values.ravel()
    y_val = df_all[var_dep].iloc[idx_val].copy().values.ravel()
    y_design = df_all[var_dep].iloc[idx_design].copy().values.ravel()
    y_test = df_all[var_dep].iloc[idx_test].copy().values.ravel()

    # Copy enough of your ElasticNetCV code here so that I can see one of your experiments
    # and get an idea of the method you used to tune the hyper parameters

    # Let ElasticNetCV try all l1_ratios in temp_l1 and note which one it chooses to use. This ratio is the best one (verified manually)
    temp_l1 = [.1, .5, .7, .9, .95, .99, 1]

    # Note that here I have indexed temp_l1 to use the value ElasticNetCV eventually chooses, to speed up the
    # running of the function. You could simply replace temp_l1[2] with temp_l1 and it would choose the same
    # value fo the l1_ratio.
    enCV_ = ElasticNetCV(
        # tries different l1 ratios given by temp_l1
        l1_ratio=temp_l1[2],
        alphas=[2**num for num in range(-6, 5)]
        # if you get non-convergence, you many need to increase max_iter
        ,
        max_iter=5000
        # we already normalised but you may get a better answer if
        # you turn this on.  You should get a different answer at least
        # since we did not normalise the splines (as discussed on Moodle)
        ,
        normalize=False,
        cv=10,
        random_state=2018,
        selection='random')

    enCV_.fit(X=X_design, y=y_design)

    #     print(enCV_.l1_ratio_)
    #     print(enCV_.alpha_)
    #     print(np.log10(enCV_.alpha_))

    # Chosen alpha is 0.015625, i.e. around 10^-1.8. Try:
    # Create an array of different alphas to try - values between 10^-1 to 10^-8, with step size 0.05.
    # Trying to find simpler model which retains good performance. MSE is relatively flat for these values of alpha.
    trial_alpha = 10**-np.arange(1, 1.8, 0.05)

    #     print(trial_alpha)

    # Note that ElasticNetCV chooses an l1 ratio of 0.7. Manually re-running ElasticNet over the train data,
    # using l1_ratio=0.7 and different values of alpha (by indexing trial_alpha) shows that a few values of
    # alpha provide a test MAE lower than 13700 and different between test and non-test error less than 1100.
    # I chose to use test_alpha[4] = 10**-1.20 = 0.06309573444801933, as it appears to give the best trade-off
    # between the two.

    #  Example of performance testing of chosen alpha and l1_ratio over train data and validation data
    en_ = ElasticNet(
        alpha=trial_alpha[4]  # type value here
        ,
        l1_ratio=enCV_.l1_ratio_  # type value here
        ,
        normalize=False,
        random_state=2018,
        selection='random',
        max_iter=5000)

    en_.fit(X=X_train, y=y_train)

    pred_train = en_.predict(X_train)
    pred_val = en_.predict(X_val)

    #     print("MAE: train:", fn_MAE(y_train, pred_train))
    #     print("MAE: val:", fn_MAE(y_val, pred_val))
    #     print(fn_MAE(y_val,   pred_val) - fn_MAE(y_train, pred_train))

    # Now copy the code for your final model here
    en_ = ElasticNet(
        alpha=trial_alpha[4]  # type value here
        ,
        l1_ratio=enCV_.l1_ratio_  # type value here
        ,
        normalize=False,
        random_state=2018,
        selection='random',
        max_iter=5000)

    en_ = en_.fit(X=X_design, y=y_design)

    pred_design = en_.predict(X_design)
    pred_test = en_.predict(X_test)

    # calculate MAE on test and non test but then hard code in the return statement
    mae_design = fn_MAE(y_design, pred_design)
    mae_test = fn_MAE(y_test, pred_test)
    #     print('design error: ', mae_design)
    #     print('test error: ', mae_test)
    #     print(mae_test - mae_design)
    return en_, X, y, 12629, 13655
Example #42
0
elasticCV.fit(x_train, y_train)


# In[41]:


alpha=elasticCV.alpha_


# In[42]:


# l1_ratio gives how close the model is to L1 regularization, below value indicates we are giving equal
#preference to L1 and L2
elasticCV.l1_ratio


# In[43]:


elasticnet_reg = ElasticNet(alpha = alpha,l1_ratio=0.5)
elasticnet_reg.fit(x_train, y_train)


# In[44]:


elasticnet_reg.score(x_test, y_test)

So, we can see by using different type of regularization, we still are getting the same r2 score. That means our OLS model has been well trained over the training data and there is no overfitting.
Example #43
0
    # Split the data into training and test sets. (0.75, 0.25) split.
    train, test = train_test_split(data)

    # The predicted column is "quality" which is a scalar from [3, 9]
    train_x = train.drop(["quality"], axis=1)
    test_x = test.drop(["quality"], axis=1)
    train_y = train[["quality"]]
    test_y = test[["quality"]]

    alpha = float(sys.argv[1]) if len(sys.argv) > 1 else 0.5
    l1_ratio = float(sys.argv[2]) if len(sys.argv) > 2 else 0.5

    with mlflow.start_run():
        lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42)
        lr.fit(train_x, train_y)

        predicted_qualities = lr.predict(test_x)

        (rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)

        print("Elasticnet model (alpha=%f, l1_ratio=%f):" % (alpha, l1_ratio))
        print("  RMSE: %s" % rmse)
        print("  MAE: %s" % mae)
        print("  R2: %s" % r2)

        mlflow.log_param("alpha", alpha)
        mlflow.log_param("l1_ratio", l1_ratio)
        mlflow.log_metric("rmse", rmse)
        mlflow.log_metric("r2", r2)
        mlflow.log_metric("mae", mae)
Example #44
0
def EN(X_train, y_train, X_test, seed):
    mdl = ElasticNet(fit_intercept = True, l1_ratio = 0.005, alpha = 0.2, tol = 1e-3, max_iter = 2000, selection = "random", 
                     random_state = seed)
    y_pred = mdl.fit(X_train, y_train).predict(X_test)
    return y_pred
Example #45
0
plt.scatter(y_test_pred,  y_test_pred - y_test,
            c='limegreen', marker='s', edgecolor='white',
            label='Test data')
plt.xlabel('Predicted values')
plt.ylabel('Residuals')
plt.legend(loc='upper left')
plt.hlines(y=0, xmin=-10, xmax=50, color='black', lw=2)
plt.xlim([-10, 50])
plt.tight_layout()

plt.show()

# Elastic Net regression:

elanet = ElasticNet(alpha=1.0, l1_ratio=0.5)
elanet.fit(X_train, y_train)
y_train_pred = elanet.predict(X_train)
y_test_pred = elanet.predict(X_test)

print ('Elasticnet Regression')
print(elanet.coef_)
print (elanet.intercept_)

print('MSE train: %.3f, test: %.3f' % (
        mean_squared_error(y_train, y_train_pred),
        mean_squared_error(y_test, y_test_pred)))
print('R^2 train: %.3f, test: %.3f' % (
        r2_score(y_train, y_train_pred),
        r2_score(y_test, y_test_pred)))

plt.scatter(y_train_pred,  y_train_pred - y_train,
linear_Lasso = Lasso(
    alpha=1.0,
    fit_intercept=True,  #计算截距
    normalize=False,  #回归之前不对数据集进行规范化处理
    copy_X=True  #复制X,不会对X的原始值产生影响
)
linear_Lasso.fit(train_x, train_y)
y_Lasso = linear_Lasso.predict(test_x)

linear_ElasticNet = ElasticNet(
    alpha=1.0,
    fit_intercept=True,  #计算截距
    normalize=False,  #回归之前不对数据集进行规范化处理
    copy_X=True  #复制X,不会对X的原始值产生影响
)
linear_ElasticNet.fit(train_x, train_y)
y_ElasticNet = linear_ElasticNet.predict(test_x)

print('模拟数据参数', coef)
print('线性回归模型参数', linear_rg.coef_)
print('岭回归模型参数', linear_Ridge.coef_)
print('Lasso回归模型参数', linear_Lasso.coef_)
print('ElasticNet回归模型参数', linear_ElasticNet.coef_)
#%%
ax2 = plt_helper('ax1', '观察不同回归模型的效果')
ax2.plot(X[:, 0], y, 'r*', label="模拟数据")
ax2.plot(test_x[:, 0], y_rg, '-k', label='线性回归模型')
ax2.plot(test_x[:, 0], y_Ridge, '.b', label="岭回归模型")
ax2.plot(test_x[:, 0], y_Lasso, '-g', label="Lasso归模型")
ax2.plot(test_x[:, 0], y_ElasticNet, '-r', label="ElasticNet回归模型")
Example #47
0
    def __init__(self, dict_train, Flt_Lambda, Flt_L1):
        '''
        Implementing Algorithm 1 in Sparse Discriminant Analysis (Clemenson), 2012, Technometrics
        :param dict_train: dictionary of training data (key: 0 - normal / 1 - PVC)
        :param Flt_Lambda: L2 penalty value
        :param Flt_L1: L1
        :return: sparse discriminant vector
        '''
        self.mat_wc_normal = dict_train[0] # N by 256 matrix
        self.mat_wc_PVC = dict_train[1] # V by 256 matrix
        self.dim = len(self.mat_wc_normal[0]) # 256

        self.X = np.concatenate((self.mat_wc_normal, self.mat_wc_PVC), axis=0) # N / V augmented matrix (transpose of [N|V])
        self.X = (self.X - np.mean(self.X,axis=0))



        self.number_normal = len(self.mat_wc_normal) # N
        self.number_PVC = len(self.mat_wc_PVC) # V
        self.number_total = self.number_normal + self.number_PVC

        self.Y = self.Construct_Y()
        self.D = np.dot(np.transpose(self.Y), self.Y) / float(self.number_total) # P
        self.Q = np.ones((2,1))

        np.random.seed(123)
        I = np.eye(2)
        for k in range(1):
            theta_initial = np.random.random(2)
            # 4-(a) in Algorithm 1 in Sparse Discriminant Analysis (2012) by Clemenson, Tehcnometrics
            theta = np.dot(I - np.dot(np.dot(self.Q, np.transpose(self.Q)), self.D ), theta_initial)
            theta /= np.sqrt(np.dot(np.dot(np.transpose(theta), self.D), theta)) # normalize

            iteration_num = 10000
            beta_prev = np.random.random(self.dim)
            # 4-(b)
            for idx in range(iteration_num):
                response = np.dot(self.Y, theta)
                elas = ElasticNet(alpha=Flt_Lambda, l1_ratio=Flt_L1) # alpha * l1_ration = lambda // 0.5 * alpha * (1 - l1_ratio) = gamma
                beta = elas.fit(X=self.X, y= response).coef_
                theta_factor_1 = I - np.dot(np.dot(self.Q, np.transpose(self.Q)),self.D)
                theta_factor_2 = np.dot(theta_factor_1, np.linalg.inv(self.D))
                theta_factor_3 = np.dot(theta_factor_2, np.transpose(self.Y))
                theta_factor_4 = np.dot(np.dot(theta_factor_3, self.X), beta)
                # print WaveTheta
                theta = theta_factor_4 / np.sqrt(np.dot(np.dot(np.transpose(theta_factor_4),self.D),theta_factor_4))

                if np.sum(np.abs(beta - beta_prev)) < 1e-6:
                    break
                else:
                    beta_prev = beta

            # print B
            self.sparse_discriminant_vector = beta

        ''' 160612 constructing sparse discriminant 'matrix' for two class case for applying T2.'''
        sparse_discriminant_matrix = np.zeros((self.dim, self.dim))
        non_zero_elem = list()

        for idx in range(self.dim):
            sparse_discriminant_matrix[idx][idx] = self.sparse_discriminant_vector[idx]
            if self.sparse_discriminant_vector[idx] != 0.0:
                non_zero_elem.append(idx)
        self.sparse_discriminant_matrix = sparse_discriminant_matrix
        self.non_zero_elem = non_zero_elem
Example #48
0
class SLIMElasticNetRecommender(BaseItemSimilarityMatrixRecommender):
    """
    Train a Sparse Linear Methods (SLIM) item similarity model.
    NOTE: ElasticNet solver is parallel, a single intance of SLIM_ElasticNet will
          make use of half the cores available

    See:
        Efficient Top-N Recommendation by Linear Regression,
        M. Levy and K. Jack, LSRS workshop at RecSys 2013.

        SLIM: Sparse linear methods for top-n recommender systems,
        X. Ning and G. Karypis, ICDM 2011.
        http://glaros.dtc.umn.edu/gkhome/fetch/papers/SLIM2011icdm.pdf
    """

    RECOMMENDER_NAME = "SLIMElasticNetRecommender"

    def __init__(self, URM_train, verbose=True):
        super(SLIMElasticNetRecommender, self).__init__(URM_train,
                                                        verbose=verbose)

    def fit(self, l1_ratio=0.1, alpha=1e-4, positive_only=True, topK=100):

        assert l1_ratio >= 0 and l1_ratio <= 1, "{}: l1_ratio must be between 0 and 1, provided value was {}".format(
            self.RECOMMENDER_NAME, l1_ratio)

        self.l1_ratio = l1_ratio
        self.positive_only = positive_only
        self.topK = topK

        # Display ConvergenceWarning only once and not for every item it occurs
        warnings.simplefilter("once", category=ConvergenceWarning)

        # initialize the ElasticNet model
        self.model = ElasticNet(alpha=alpha,
                                l1_ratio=self.l1_ratio,
                                positive=self.positive_only,
                                fit_intercept=False,
                                copy_X=False,
                                precompute=True,
                                selection='random',
                                max_iter=10,
                                tol=1e-4)

        URM_train = check_matrix(self.URM_train, 'csc', dtype=np.float32)

        n_items = URM_train.shape[1]

        # Use array as it reduces memory requirements compared to lists
        dataBlock = 10000000

        rows = np.zeros(dataBlock, dtype=np.int32)
        cols = np.zeros(dataBlock, dtype=np.int32)
        values = np.zeros(dataBlock, dtype=np.float32)

        numCells = 0

        start_time = time.time()
        start_time_printBatch = start_time

        # fit each item's factors sequentially (not in parallel)
        for currentItem in range(n_items):

            # get the target column
            y = URM_train[:, currentItem].toarray()

            # set the j-th column of X to zero
            start_pos = URM_train.indptr[currentItem]
            end_pos = URM_train.indptr[currentItem + 1]

            current_item_data_backup = URM_train.data[start_pos:end_pos].copy()
            URM_train.data[start_pos:end_pos] = 0.0

            # fit one ElasticNet model per column
            self.model.fit(URM_train, y)

            # self.model.coef_ contains the coefficient of the ElasticNet model
            # let's keep only the non-zero values

            # Select topK values
            # Sorting is done in three steps. Faster then plain np.argsort for higher number of items
            # - Partition the data to extract the set of relevant items
            # - Sort only the relevant items
            # - Get the original item index

            nonzero_model_coef_index = self.model.sparse_coef_.indices
            nonzero_model_coef_value = self.model.sparse_coef_.data

            local_topK = min(len(nonzero_model_coef_value) - 1, self.topK)

            relevant_items_partition = (
                -nonzero_model_coef_value
            ).argpartition(local_topK)[0:local_topK]
            relevant_items_partition_sorting = np.argsort(
                -nonzero_model_coef_value[relevant_items_partition])
            ranking = relevant_items_partition[
                relevant_items_partition_sorting]

            for index in range(len(ranking)):

                if numCells == len(rows):
                    rows = np.concatenate(
                        (rows, np.zeros(dataBlock, dtype=np.int32)))
                    cols = np.concatenate(
                        (cols, np.zeros(dataBlock, dtype=np.int32)))
                    values = np.concatenate(
                        (values, np.zeros(dataBlock, dtype=np.float32)))

                rows[numCells] = nonzero_model_coef_index[ranking[index]]
                cols[numCells] = currentItem
                values[numCells] = nonzero_model_coef_value[ranking[index]]

                numCells += 1

            # finally, replace the original values of the j-th column
            URM_train.data[start_pos:end_pos] = current_item_data_backup

            elapsed_time = time.time() - start_time
            new_time_value, new_time_unit = seconds_to_biggest_unit(
                elapsed_time)

            if time.time(
            ) - start_time_printBatch > 30 or currentItem == n_items - 1:
                self._print(
                    "Processed {} ( {:.2f}% ) in {:.2f} {}. Items per second: {:.2f}"
                    .format(currentItem + 1,
                            100.0 * float(currentItem + 1) / n_items,
                            new_time_value, new_time_unit,
                            float(currentItem) / elapsed_time))

                sys.stdout.flush()
                sys.stderr.flush()

                start_time_printBatch = time.time()

        # generate the sparse weight matrix
        self.W_sparse = sps.csr_matrix(
            (values[:numCells], (rows[:numCells], cols[:numCells])),
            shape=(n_items, n_items),
            dtype=np.float32)
Example #49
0
File: core.py Project: chrinide/ffx
 def fit(self, *args, **kwargs):
     return ElasticNet.fit(self, *args, **kwargs)
Example #50
0
df['ws_12h_min'] = df['time'].apply(lambda x: dfb[
    (dfb.index <= x) & (dfb.index >= (x - timedelta(hours=12)))]['ws'].min())
df['ws_12h_max'] = df['time'].apply(lambda x: dfb[
    (dfb.index <= x) & (dfb.index >= (x - timedelta(hours=12)))]['ws'].max())
df['ws_12h_std'] = df['time'].apply(lambda x: dfb[
    (dfb.index <= x) & (dfb.index >= (x - timedelta(hours=12)))]['ws'].std())

model_am = ElasticNet()
model_pm = ElasticNet()
X_am = df[df['type'] == 'AM'][[
    col for col in df.columns
    if (col.startswith('temp') or col.startswith('rh') or col.startswith('ws'))
]]
y_am = df[df['type'] == 'AM']['growth_rate']
model_am.fit(X_am, y_am)
print('Morning observations: R-squared = %0.2f' % model_am.score(X_am, y_am))

X_pm = -df[df['type'] == 'PM'][[
    col for col in df.columns
    if (col.startswith('temp') or col.startswith('rh') or col.startswith('ws'))
]]
y_pm = df[df['type'] == 'PM']['growth_rate']
model_pm.fit(X_pm, y_pm)
print('Evening observations: R-squared = %0.2f' % model_pm.score(X_pm, y_pm))

fig = plt.figure(figsize=(16, 8))

# getting unzipped lists of feature names and corresponding coefficients, sorted by absolute decreasing coefficient
features_am, weights_am = zip(*sorted(
    zip(X_am.columns, model_am.coef_), key=lambda x: abs(x[1]), reverse=True))
# It is made available under the MIT License

import numpy as np
from sklearn.datasets import load_svmlight_file
from sklearn.cross_validation import KFold
from sklearn.linear_model import ElasticNet
from sklearn.metrics import mean_squared_error, r2_score

data, target = load_svmlight_file('data/E2006.train')

# Edit the lines below if you want to switch method:
# met = LinearRegression(fit_intercept=True)
met = ElasticNet(fit_intercept=True, alpha=.1)

kf = KFold(len(target), n_folds=5)
pred = np.zeros_like(target)
for train, test in kf:
    met.fit(data[train], target[train])
    pred[test] = met.predict(data[test])

print('[EN 0.1] RMSE on testing (5 fold), {:.2}'.format(np.sqrt(mean_squared_error(target, pred))))
print('[EN 0.1] R2 on testing (5 fold), {:.2}'.format(r2_score(target, pred)))
print('')

met.fit(data, target)
pred = met.predict(data)
print('[EN 0.1] RMSE on training, {:.2}'.format(np.sqrt(mean_squared_error(target, pred))))
print('[EN 0.1] R2 on training, {:.2}'.format(r2_score(target, pred)))


Example #52
0
# model.add(Dropout(0.2))
# model.add(LSTM(input_shape=(None,50),units=50,return_sequences=True))
# model.add(Dropout(0.2))
# model.add(LSTM(input_shape=(None,50),units=50,return_sequences=True))
# model.add(Dropout(0.2))
# model.add(LSTM(50,return_sequences=False))
# model.add(Dropout(0.2))
# model.add(Dense(units=1))
# model.add(Activation("linear"))
# start = time.time()
# model.compile(loss="mse", optimizer="rmsprop")
# print("Time : ", time.time() - start)
# #print(model.layers)

# model.fit(X_train,y_train,batch_size=50,epochs=10,validation_split=0.05)

model = ElasticNet(l1_ratio=0.5, normalize=True, max_iter=15000)
model.set_params(alpha=0.001)
model.fit(X_train, y_train)

y_test = model.predict(X_test)
prediction_result = y_test
#prediction_result = []
# for i in range(len(y_test)):
# 	prediction_result.append(y_test[i][0])
speed_id = [x for x in range(len(y_test))]
#print(type(y_test))
result = pd.DataFrame({'id': speed_id, 'speed': prediction_result})
result.to_csv('submission.csv', index=False)
print(y_test)
from sklearn.linear_model import Lasso

alpha = 0.1
lasso = Lasso(alpha=alpha)

y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)

# #############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet

enet = ElasticNet(alpha=alpha, l1_ratio=0.7)

y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)

plt.plot(enet.coef_, color='lightgreen', linewidth=2,
         label='Elastic net coefficients')
plt.plot(lasso.coef_, color='gold', linewidth=2,
         label='Lasso coefficients')
plt.plot(coef, '--', color='navy', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
          % (r2_score_lasso, r2_score_enet))
plt.show()
Example #54
0
plt.ylim([miny - 0.05 * abs(maxy - miny), maxy + 0.05 * abs(maxy - miny)])
plt.legend(loc='best')
plt.tight_layout()
plt.show()
""" Begin ELASTIC NET parameter search
"""
l1_list = np.logspace(-3, 2, 50)
alpha_list = np.logspace(-3, 2, 50)
mv = 0.0
amax, l1max = 0, 0
r2_list = []
for l1 in l1_list:
    r2_list.append([])
    for alpha in alpha_list:
        enet = ElasticNet(alpha=alpha, l1_ratio=l1, positive=True)
        y_pred_enet = enet.fit(A1, yval1).predict(A1)
        #        score = r2_score(yval2, np.dot(A2, enet.coef_))
        #        Uncomment below to optimize wrt real spectrum
        if np.max(enet.coef_) >= 1E-8:
            score = r2_score(x_real / np.max(x_real),
                             enet.coef_ / np.max(enet.coef_))
        else:
            score = 0
        r2_list[-1].append(score)
        if score > mv:
            mv = score
            amax, l1max = alpha, l1

print "alphamax = " + str(amax) + ",  l1max = " + str(l1max)

fig, ax = plt.subplots()
class SLIM(BaseModel):
    def __init__(self, model_conf, num_users, num_items, device):
        super(SLIMElastic, self).__init__()
        self.num_users = num_users
        self.num_items = num_items

        self.l1_reg = model_conf.l1_reg
        self.l2_reg = model_conf.l2_reg
        self.topk = model_conf.topk

        self.device = device

        alpha = self.l1_reg + self.l2_reg
        l1_ratio = self.l1_reg / alpha
        self.slim = ElasticNet(alpha=alpha,
                               l1_ratio=l1_ratio,
                               positive=True,
                               fit_intercept=False,
                               copy_X=False,
                               precompute=True,
                               selection='random',
                               max_iter=300,
                               tol=1e-3)

    def train_one_epoch(self, dataset, optimizer, batch_size, verbose):
        train_matrix = dataset.train_matrix.tocsc()
        num_items = train_matrix.shape[1]

        # Use array as it reduces memory requirements compared to lists
        dataBlock = 10000000

        rows = np.zeros(dataBlock, dtype=np.int32)
        cols = np.zeros(dataBlock, dtype=np.int32)
        values = np.zeros(dataBlock, dtype=np.float32)

        numCells = 0
        tqdm_iterator = tqdm(range(num_items),
                             desc='# items covered',
                             total=num_items)
        for item in tqdm_iterator:
            y = train_matrix[:, item].toarray()

            # set the j-th column of X to zero
            start_pos = train_matrix.indptr[item]
            end_pos = train_matrix.indptr[item + 1]

            current_item_data_backup = train_matrix.data[
                start_pos:end_pos].copy()
            train_matrix.data[start_pos:end_pos] = 0.0

            self.slim.fit(train_matrix, y)

            # Select topK values
            # Sorting is done in three steps. Faster then plain np.argsort for higher number of items
            # - Partition the data to extract the set of relevant items
            # - Sort only the relevant items
            # - Get the original item index

            # nonzero_model_coef_index = self.model.coef_.nonzero()[0]
            # nonzero_model_coef_value = self.model.coef_[nonzero_model_coef_index]

            nonzero_model_coef_index = self.slim.sparse_coef_.indices
            nonzero_model_coef_value = self.slim.sparse_coef_.data

            local_topK = min(len(nonzero_model_coef_value) - 1, self.topk)

            relevant_items_partition = (
                -nonzero_model_coef_value
            ).argpartition(local_topK)[0:local_topK]
            relevant_items_partition_sorting = np.argsort(
                -nonzero_model_coef_value[relevant_items_partition])
            ranking = relevant_items_partition[
                relevant_items_partition_sorting]

            for index in range(len(ranking)):

                if numCells == len(rows):
                    rows = np.concatenate(
                        (rows, np.zeros(dataBlock, dtype=np.int32)))
                    cols = np.concatenate(
                        (cols, np.zeros(dataBlock, dtype=np.int32)))
                    values = np.concatenate(
                        (values, np.zeros(dataBlock, dtype=np.float32)))

                rows[numCells] = nonzero_model_coef_index[ranking[index]]
                cols[numCells] = item
                values[numCells] = nonzero_model_coef_value[ranking[index]]

                numCells += 1

            train_matrix.data[start_pos:end_pos] = current_item_data_backup

        self.W_sparse = sp.csr_matrix(
            (values[:numCells], (rows[:numCells], cols[:numCells])),
            shape=(num_items, num_items),
            dtype=np.float32)

        return 0.0

    def predict(self, eval_users, eval_pos, test_batch_size):
        # eval_pos_matrix
        preds = (eval_pos * self.W_sparse).toarray()
        preds[eval_pos.nonzero()] = float('-inf')
        return preds
def _elastic(X_train, y_train):
    el = ElasticNet(alpha=1.0, l1_ratio=0.7)
    el.fit(X_train, y_train)
    return el.coef_
Example #57
0
    "max_depth": max_depth,
    "subsample":
    subsample,  # collect 80% of the data only to prevent overfitting
    "colsample_bytree": colsample_bytree,
    "silent": 1,
    "seed": 0,
}

watchlist = [(dtrain, 'train')]  # list of things to evaluate and print
gbm = xgb.train(params,
                dtrain,
                num_boost_round,
                evals=watchlist,
                early_stopping_rounds=early_stopping_rounds,
                verbose_eval=True)  # find the best score
x_pred = np.expm1(gbm.predict(dtest))

elastic = ElasticNet(alpha=0.0005, l1_ratio=0.9)
elastic.fit(X_train, y)
elas_preds = np.expm1(elastic.predict(X_test))

lasso_preds = np.expm1(lasso_model.predict(X_test))
final_result = 0.8 * lasso_preds + 0.2 * x_pred

solution = pd.DataFrame({
    "id": test.Id,
    "SalePrice": final_result
},
                        columns=['id', 'SalePrice'])
solution.to_csv("final_upload_11PM2.csv", index=False)
Example #58
0
from sklearn.model_selection import train_test_split
from sklearn.linear_model import ElasticNet, Ridge, Lasso
from sklearn.datasets import make_regression


X, y = make_regression(n_features=75, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)

en_regr = ElasticNet(random_state=0)
en_regr.fit(X_train, y_train)

ridge_regr = Ridge()
ridge_regr.fit(X_train, y_train)

lasso_regr = Lasso()
lasso_regr.fit(X_train, y_train)

print("ELASTICNET")
print("training set score: {:.2f}".format(en_regr.score(X_train, y_train)))
print("test set score: {:.2f}".format(en_regr.score(X_test, y_test)))
# print("coef_: {0}".format(en_regr.coef_))

print("RIDGE")
print("training set score: {:.2f}".format(ridge_regr.score(X_train, y_train)))
print("test set score: {:.2f}".format(ridge_regr.score(X_test, y_test)))
# print("coef_: {0}".format(ridge_regr.coef_))

print("LASSO")
print("training set score: {:.2f}".format(lasso_regr.score(X_train, y_train)))
print("test set score: {:.2f}".format(lasso_regr.score(X_test, y_test)))
# print("coef_: {0}".format(lasso_regr.coef_))
            std = X_train.std(axis=0)
            mean = X_train.mean(axis=0)
            X_train = (X_train - mean) / std
            X_test = (X_test - mean) / std

            std = y_train.std(axis=0)
            mean = y_train.mean(axis=0)
            y_train = (y_train - mean) / std
            y_test = (y_test - mean) / std

            gc.collect()
            print("- benching ElasticNet")
            clf = ElasticNet(alpha=alpha, rho=0.5, fit_intercept=False)
            tstart = time()
            clf.fit(X_train, y_train)
            elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
                                                       y_test)
            elnet_results[i, j, 1] = time() - tstart

            gc.collect()
            print("- benching SGD")
            n_iter = np.ceil(10 ** 4.0 / n_train)
            clf = SGDRegressor(alpha=alpha, fit_intercept=False,
                               n_iter=n_iter, learning_rate="invscaling",
                               eta0=.01, power_t=0.25)

            tstart = time()
            clf.fit(X_train, y_train)
            sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
                                                     y_test)
Example #60
0
print(np.sqrt(mean_squared_error(y_valid, y_predict)))


# In[ ]:


from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import ElasticNet, Ridge


# In[ ]:


# Elastic net combination of Lasso(L1) and Ridge(L2), We can see from below results Lasso is better
elastic_net_model = ElasticNet()
elastic_net_model.fit(x_train, y_train)
y_predict=elastic_net_model.predict(x_valid)
print(mean_absolute_error(y_valid, y_predict))
print(mean_squared_error(y_valid, y_predict))
print(np.sqrt(mean_squared_error(y_valid, y_predict)))


# In[ ]:


# Ridge(L2), We can see from below results Lasso is better, Finally we can see Ridge is better than all
ridfe_net_model = Ridge()
ridfe_net_model.fit(x_train, y_train)
y_predict=ridfe_net_model.predict(x_valid)
print(mean_absolute_error(y_valid, y_predict))
print(mean_squared_error(y_valid, y_predict))