plt.ylabel('Cost') plt.show() # fig.savefig('linReg_regularization_comparison.eps', format='eps') # predict and eval on test set print '... predicting ...' # add column of ones to data to account for the bias: X_test = add_intercept(X_test) print X_test.shape pred = list() for cx in X_test: pred.append(predict(cx)) y_hat = np.array(pred, dtype=float) RMSE, pcorr, error_per_song, mean_per_song = evaluate1d( y_test, y_hat, tst_song) All_stack = np.hstack((error_per_song, mean_per_song)) print ' Error per song (ar/val) Mean_per_song (ar/val) :\n' print(All_stack) print '\n' print 'song_id :' print(song_id_tst) print '\n' #print('Error per song: \n', Error_per_song) print('sklearn --> arrousal : %.4f, valence : %.4f\n' 'Pearson Corr --> arrousal : %.4f, valence : %.4f \n' % (RMSE[0], -1., pcorr[0][0], -1) # % (RMSE[0],RMSE[1],pcorr[0][0], pcorr[1][0])
print res.summary() # cf http://statsmodels.sourceforge.net/devel/mixed_linear.html # md = smf.mixedlm(y_train, X_train, groups=data["Pig"]) # mdf = md.fit() # print(mdf.summary()) pred = res.predict(X_test) # print pred all_fold_pred.append(pred) all_fold_y_test.append(y_test.tolist()) y_hat = np.array(pred, dtype=float) RMSE, pcorr, error_per_song, mean_per_song = evaluate1d(y_test, y_hat, id_test.shape[0]) # All_stack =np.hstack(( error_per_song, mean_per_song )) # print' Error per song (ar/val) Mean_per_song (ar/val) :\n' # print(All_stack) # print '\n' # # print'song_id :' # print(id_test) # print '\n' # #print('Error per song: \n', Error_per_song) print( 'sklearn --> arrousal : %.4f, valence : %.4f\n' 'Pearson Corr --> arrousal : %.4f, valence : %.4f \n' % (RMSE[0], -1. , pcorr[0][0], -1)
plt.ylabel("Cost") plt.show() # fig.savefig('linReg_regularization_comparison.eps', format='eps') # predict and eval on test set print "... predicting ..." # add column of ones to data to account for the bias: X_test = add_intercept(X_test) print X_test.shape pred = list() for cx in X_test: pred.append(predict(cx)) y_hat = np.array(pred, dtype=float) RMSE, pcorr, error_per_song, mean_per_song = evaluate1d(y_test, y_hat, tst_song) All_stack = np.hstack((error_per_song, mean_per_song)) print " Error per song (ar/val) Mean_per_song (ar/val) :\n" print (All_stack) print "\n" print "song_id :" print (song_id_tst) print "\n" # print('Error per song: \n', Error_per_song) print ( "sklearn --> arrousal : %.4f, valence : %.4f\n" "Pearson Corr --> arrousal : %.4f, valence : %.4f \n" % (RMSE[0], -1.0, pcorr[0][0], -1) # % (RMSE[0],RMSE[1],pcorr[0][0], pcorr[1][0])
plt.show() # fig.savefig('linReg_regularization_comparison.eps', format='eps') # predict and eval on test set print '... predicting ...' pred = list() for cx in X_test: pred.append(predict(cx)) all_fold_pred.append(pred) all_fold_y_test.append(y_test.tolist()) y_hat = np.array(pred, dtype=float) RMSE, pcorr, error_per_song, mean_per_song = evaluate1d( y_test, y_hat, id_test.shape[0]) # All_stack =np.hstack(( error_per_song, mean_per_song )) # print' Error per song (ar/val) Mean_per_song (ar/val) :\n' # print(All_stack) # print '\n' # # print'song_id :' # print(id_test) # print '\n' # #print('Error per song: \n', Error_per_song) print('sklearn --> arrousal : %.4f, valence : %.4f\n' 'Pearson Corr --> arrousal : %.4f, valence : %.4f \n' % (RMSE[0], -1., pcorr[0][0], -1) # % (RMSE[0],RMSE[1],pcorr[0][0], pcorr[1][0])