def regression_test(X, Y): norm_train_x = preprocessing.MinMaxScaler((-1,1)).fit_transform(X) max_layer_size = len(x_cols)**2 max_layers = [Layer("Sigmoid", units=max_layer_size/4), Layer("Sigmoid", units=max_layer_size/2), # Layer("Sigmoid", units=max_layer_size/2), # Layer("Sigmoid", units=max_layer_size/4), Layer("Linear")] nn = Regressor(layers=max_layers,learning_rate=0.08, n_iter=300) regressors = [('Random Forest Regressor', RandomForestRegressor(n_estimators=100), False), ('AdaBoost Regressor', AdaBoostRegressor(), False), ('SVR', SVR(), False), ('Neural Net w/ Sigmoid -> Sigmoid -> Linear', nn, True)] for name, reg, norm in regressors: if norm: train_x = norm_train_x else: train_x = X print name preds = cross_validation.cross_val_predict(reg, train_x, Y, cv=K) print 'R^2:', metrics.r2_score(Y, preds)
def fit_neural_network(self, topic): feature_list = ["char_length", "total_words", "unique_words", "content_tag_ratio"] features = MinMaxScaler().fit_transform(self.get_tweet_features(topic, feature_list)) data = train_test_split(features, self.tweet_weights(topic), 0.20) features_train, features_test, labels_train, labels_test = data nn = Regressor( layers=[ Layer("Rectifier", units=20), Layer("Rectifier", units=20), Layer("Linear")], learning_rate=0.02, n_iter=20, valid_set=(np.array(features_test), np.array(labels_test))) nn, data, acc = fit_and_score(nn, data) self.nn = nn return nn, data, acc rs = RandomizedSearchCV(nn, param_distributions={ 'learning_rate': stats.uniform(0.001, 0.05), 'hidden0__units': stats.randint(5, 50), 'hidden0__type': ["Rectifier", "Sigmoid", "Tanh"], 'hidden1__units': stats.randint(5, 50), 'hidden1__type': ["Rectifier", "Sigmoid", "Tanh"], 'n_iter': stats.uniform(5,20), }, n_iter=10) rs, data, acc = fit_and_score(rs, data) self.nn = rs return rs, data, acc
def __init__(self, verbose = False): self.name = "Neural net Regression Learner" self.network = Regressor( layers=[ Layer("Rectifier", units=100), Layer("Linear")], learning_rate=0.02, n_iter=10)
def __init__(self, new=False, display=False): self.possibilities = generate(Learn.n_coups) np.random.shuffle(self.possibilities) self.explore = 0. self.jeu = MJ.Jeu(autorepeat=False, display=display) self.jeu.restart(Learn.coups_sautes) self.image = self.get_image() if new: self.nn = Regressor(layers=[ Layer("Linear", units=(Learn.n_cell + Learn.n_coups)), Layer("Sigmoid", units=1000), Layer("Sigmoid") ], learning_rate=0.01, n_iter=1) self.nn.fit( self.good_shape(self.image, self.possibilities[Learn.n_coups / 2 - 1]), np.array([[0]])) else: self.nn = pickle.load(open('nn.pkl', 'rb')) self.nn.fit( self.good_shape(self.image, self.possibilities[Learn.n_coups / 2 - 1]), np.array([[1]])) self.current_data_set = []
def __init__(self, iterations=5): results = [] situations = [] logging.basicConfig() for i in range(0, iterations): g = Game(print_board=False) round_situations = [] while not g.game_over: choices = g.available_cols() choice = random.choice(choices) round_situations.append(self.game_to_sit(g, choice)) g.place_piece(choice) for situation in round_situations: results.append(g.points) situations.extend(round_situations) #self.pipeline = Pipeline([ # ('min/max scaler', MinMaxScaler(feature_range=(0.0, 1.0))), # ('neural network', Regressor( self.nn = Regressor(layers=[ Layer("Rectifier", units=100), Layer("Linear")], learning_rate=0.00002, n_iter=10) #self.pipeline.fit(np.array(situations), np.array(results)) print np.array(situations).shape self.nn.fit(np.array(situations), np.array(results))
def best_model(model_name): rdf_params = { 'max_features': [5, 10, 15, 20], 'n_estimators': [10, 15, 20] } # layer_opt = np.random.randint(low=3,high=15,size=3) # mlp_params = {'n_iter':[80],'learning_rate':[0.02], # 'hidden0__type':['Rectifier'], # 'hidden0__units':layer_opt, # 'hidden1__type':['Rectifier'], # 'hidden1__units':layer_opt} if 'rdf' in model_name: # best_model = GridSearchCV(RandomForestRegressor(), rdf_params).fit(features, target) # regressor = best_model.best_estimator_ # return RandomForestRegressor(max_features=regressor.max_features,n_estimators=regressor.n_estimators) return RandomForestRegressor() elif 'mlp' in model_name: mlp = Regressor(layers=[ Layer("Rectifier", units=6), Layer("Rectifier", units=4), Layer("Linear") ]) # warnings.filterwarnings("ignore", category=DeprecationWarning) #run this line separately # best_model = GridSearchCV(mlp, mlp_params).fit(features, target) # regressor = best_model.best_estimator_ return mlp
def train_dropout_nn(X, y, model_type='classifier', cv_fold=5): """ Parameters ---------- X y model_type cv_fold Returns ------- """ # a grid of hyperparameters from which to search for an optimal combination param_grid = { 'weight_decay': [0.05, 0.01, 0.005, 0.001], 'dropout_rate': [0.25, 0.50], 'learning_momentum': np.arange(0.1, 1.0, 0.3), 'learning_rate': [0.05, 0.01, 0.005, 0.001], 'hidden0__units': [8, 16, 32, 64], 'hidden0__dropout': [0.25, 0.50] } # create appropriate model type if model_type == 'classifier': model = Classifier( layers=[Layer('Sigmoid'), Layer('Softmax')], regularize='L2', verbose=True ) else: model = Regressor( layers=[Layer('Sigmoid'), Layer('Linear')], regularize='L2', verbose=True ) # do a grid search for optimal hyperparameters grid_search = GridSearchCV( estimator=model, param_grid=param_grid, scoring='neg_mean_squared_error', cv=cv_fold, refit=True ) logging.info('Fitting neural networks regularized with dropout ...') grid_search.fit(X, y) # print results from grid search logging.info('best hyperparameter combination %s' % grid_search.best_params_) gs_results = grid_search.cv_results_ for params, mean_score in zip( gs_results['params'], gs_results['mean_test_score'] ): print(params, '%.2f' % np.sqrt(-mean_score)) # return the final model return grid_search.best_estimator_
def neural_net(features, target, test_size_percent=0.2, cv_split=3, n_iter=100, learning_rate=0.01): '''Features -> Pandas Dataframe with attributes as columns target -> Pandas Dataframe with target column for prediction Test_size_percent -> Percentage of data point to be used for testing''' scale = preprocessing.MinMaxScaler() X_array = scale.fit_transform(features) y_array = scale.fit_transform(target) mlp = Regressor( layers=[ Layer("Rectifier", units=5), # Hidden Layer1 Layer("Rectifier", units=3) # Hidden Layer2 , Layer("Linear") ], # Output Layer n_iter=n_iter, learning_rate=0.01) X_train, X_test, y_train, y_test = train_test_split( X_array, y_array.T.squeeze(), test_size=test_size_percent, random_state=4) mlp.fit(X_train, y_train) test_prediction = mlp.predict(X_test) tscv = TimeSeriesSplit(cv_split) training_score = cross_val_score(mlp, X_train, y_train, cv=tscv.n_splits) testing_score = cross_val_score(mlp, X_test, y_test, cv=tscv.n_splits) print "Cross-val Training score:", training_score.mean() # print"Cross-val Testing score:", testing_score.mean() training_predictions = cross_val_predict(mlp, X_train, y_train, cv=tscv.n_splits) testing_predictions = cross_val_predict(mlp, X_test, y_test, cv=tscv.n_splits) training_accuracy = metrics.r2_score(y_train, training_predictions) # test_accuracy_model = metrics.r2_score(y_test,test_prediction_model) test_accuracy = metrics.r2_score(y_test, testing_predictions) # print"Cross-val predicted accuracy:", training_accuracy print "Test-predictions accuracy:", test_accuracy plot_model(target, y_train, y_test, training_predictions, testing_predictions) return mlp
def fit_compiled(self, data_matrix_in, target=None): data_matrix_out = self._fit_transform(data_matrix_in, target=target) n_features_in = data_matrix_in.shape[1] n_features_out = data_matrix_out.shape[1] n_features_hidden = int(n_features_in * self.deepnet_n_features_hidden_factor) layers = [] for i in range(self.deepnet_n_hidden_layers): layers.append(Layer("Rectifier", units=n_features_hidden, name='hidden%d' % i)) layers.append(Layer("Linear", units=n_features_out)) self.net = Regressor(layers=layers, learning_rate=self.deepnet_learning_rate, valid_size=0.1) self.net.fit(data_matrix_in, data_matrix_out) return self.net
def train_nn(train_set, validation_set): nn = Regressor( layers=[Layer("Sigmoid", units=2), Layer("Sigmoid")], learning_rate=0.0001, batch_size=5, n_iter=10000, valid_set=validation_set, verbose=True, ) nn.fit(train_set[0], train_set[1]) return nn
def __init__(self, params=None, seq_pre_processor=None): self.scale = StandardScaler() self.pre_processor = seq_pre_processor self.params = params if params != None: # Initialize the network self.net = Regressor(layers=params['layers'], learning_rate=params['learning_rate'], n_iter=params['n_iter'], dropout_rate=params['dropout_rate'], batch_size=params['batch_size'], regularize=params['regularize'], valid_size=params['valid_size']) # Initialize the vectorizer self.vectorizer = graph.Vectorizer(r=params['radius'], d=params['d_seq'], min_r=params['min_r'], normalization=params['normalization'], inner_normalization=params['inner_normalization'], nbits=params['nbits_seq'])
def __init__(self, new=False, display=False): # self.possibilities = generate(Learn.n_coups) # np.random.shuffle(self.possibilities) self.jeu = MJ.Jeu(autorepeat=False, display=display) self.jeu.restart(Learn.coups_sautes) self.previous_image = self.get_image() self.jeu.update_all() self.current_image=self.get_image() if new: self.nn = Regressor(layers=[Layer("Linear", units=(Learn.n_cell*2)), Layer("Linear", units=Learn.n_cell*4), Layer("Linear",units=Learn.n_cell)], learning_rate=0.01, n_iter=1) self.nn.fit(self.good_shape_2(self.previous_image, self.current_image), self.good_shape_1(self.current_image)) else: self.nn = pickle.load(open('nn_image_prediction.pkl', 'rb')) self.nn.fit(self.good_shape_2(self.previous_image, self.current_image), self.good_shape_1(self.current_image)) self.current_data_set = []
def get_nn_clf(): nn = Regressor( layers=[ # Layer('Maxout', units=300, pieces=2), # Layer('Maxout', units=300, pieces=2), Layer('Rectifier', units=500), Layer('Rectifier', units=500), Layer('Linear')], learning_rate=0.1, learning_rule='adagrad', learning_momentum=0.9, batch_size=40, valid_size= 0.1, n_iter=200, verbose=True) clf = make_pipeline(OneHotEncoder(categorical_features = [0], sparse = False), MinMaxScaler(feature_range=(-0.5,0.5)), nn) return clf
def trainAndTest(l1,l2,i,bestRMSEOutput, meanRMSEOutput): nn = Regressor( layers=[ Layer("Rectifier", units=l1), Layer("Tanh", units=l2), Layer("Linear")], learning_rate=0.02, n_iter=NN_ITERATIONS) #CrossvalidationMode scores = cross_validation.cross_val_score(nn, attributes, ratings, scoring='mean_squared_error', cv=CV_ITERATIONS) print("Scores for "+str(l1)+" "+str(l2)+" :") scores_a=(abs(np.array(scores))*10000) scores_a=scores_a**.5 bestRMSEOutput[i]=scores_a.min() meanRMSEOutput[i]=scores_a.mean()
def train_for_sum(y_train): features_scaler = MinMaxScaler() for pair in features_scaler.fit_transform(x_train): y_train.append(pair[0] + pair[1]) y_train = np.array(y_train).reshape(len(y_train), 1) pipeline = Pipeline([ ('min/max scaler', MinMaxScaler(feature_range=(-1.0, 1.0), )), ('neural network', Regressor( layers=[Layer("Rectifier", units=2), Layer("Linear", units=1)], learning_rate=0.01, verbose=True, n_iter=1000)) ]) pipeline.fit(x_train, y_train) return (pipeline, features_scaler)
def evalOne(parameters): all_obs = [] all_pred = [] for location in locations: trainX, testX, trainY, testY = splitDataForXValidation( location, "location", data, all_features, "target") normalizer_X = StandardScaler() trainX = normalizer_X.fit_transform(trainX) testX = normalizer_X.transform(testX) normalizer_Y = StandardScaler() trainY = normalizer_Y.fit_transform(trainY) testY = normalizer_Y.transform(testY) layers = [] for _ in range(0, parameters["hidden_layers"]): layers.append( Layer(parameters["hidden_type"], units=parameters["hidden_neurons"])) layers.append(Layer("Linear")) model = Regressor(layers=layers, learning_rate=parameters["learning_rate"], n_iter=parameters["iteration"], random_state=42) X = np.array(trainX) y = np.array(trainY) model.fit(X, y) model.fit(trainX, trainY) prediction = model.predict(testX) prediction = normalizer_Y.inverse_transform(prediction) testY = normalizer_Y.inverse_transform(testY) print("location: " + str(location) + " -> " + str(rmseEval(prediction, testY)[1])) all_obs.extend(testY) all_pred.extend(prediction) return rmseEval(all_obs, all_pred)[1]
def trainNeuralNetwork(data, columns, targetColumn, parameters): modelColumns = [] for column in columns: if column != targetColumn: modelColumns.append(column) modelData = [] for i in range(0, len(data[targetColumn])): record = [] for column in modelColumns: record.append(data[column][i]) modelData.append(record) layers = [] layers.append(Layer("Rectifier", units=60)) for i in range(0, parameters["hidden_layers"]): layers.append( Layer(parameters["hidden_type"], units=parameters["hidden_neurons"])) layers.append(Layer("Linear")) model = Regressor( layers=layers # Layer("Rectifier", units=60), # Layer("Sigmoid", units=80), # Layer("Sigmoid", units=80), # Layer("Linear") , learning_rate=0.01, n_iter=parameters["iteration"]) X = np.array(modelData) y = np.array(data[targetColumn]) model.fit(X, y) return NeuralNetworkModel(model, modelColumns)
def NeuralNet(train,test,features): eta = 0.025 niter = 2000 regressor = Regressor( layers=[ Layer('Rectifier', units=100), Layer("Tanh", units=100), Layer("Sigmoid", units=100), Layer('Linear')], learning_rate=eta, learning_rule='momentum', learning_momentum=0.9, batch_size=100, valid_size=0.01, n_stable=100, n_iter=niter, verbose=True) print regressor.__class__.__name__ start = time.time() regressor.fit(np.array(train[list(features)]), train[goal]) print ' -> Training time:', time.time() - start if not os.path.exists('result/'): os.makedirs('result/') # TODO: fix this shit predictions = regressor.predict(np.array(test[features])) try: # try to flatten a list that might be flattenable. predictions = list(itertools.chain.from_iterable(predictions)) except: pass csvfile = 'result/dat-nnet-eta%s-niter%s.csv' % (str(eta),str(niter)) with open(csvfile, 'w') as output: writer = csv.writer(output, lineterminator='\n') writer.writerow([myid,goal]) for i in range(0, len(predictions)): writer.writerow([i+1,predictions[i]])
def trainAndTest(l1, l2, i, bestRMSEOutput, meanRMSEOutput): nn = Regressor(layers=[ Layer("Rectifier", units=l1), Layer("Tanh", units=l2), Layer("Linear") ], learning_rate=0.02, n_iter=NN_ITERATIONS) #CrossvalidationMode scores = cross_validation.cross_val_score(nn, attributes, ratings, scoring='mean_squared_error', cv=CV_ITERATIONS) #No Crossvalidation; run only once on random split data #scores=[] #attributes_train, attributes_test, ratings_train, ratings_test = cross_validation.train_test_split(attributes, ratings, test_size=0.10, random_state=42) #print(len(attributes_train)) #print(len(attributes_test)) #print(len(ratings_train)) #print(len(ratings_test)) # nn.fit(attributes_train, ratings_train) # ratings_result = nn.predict(attributes_test) # # mse = MSE(ratings_test, ratings_result)**.5 # scores.append(mse) print("Scores for " + str(l1) + " " + str(l2) + " :") scores_a = (abs(np.array(scores)) * 10000) scores_a = scores_a**.5 bestRMSEOutput[i] = scores_a.min() meanRMSEOutput[i] = scores_a.mean()
def trainNeuralNetwork(data, columns, targetColumn, params): modelColumns = [] for column in columns: if column != targetColumn: modelColumns.append(column) modelData = [] for i in range(0, len(data[targetColumn])): record = [] for column in modelColumns: record.append(data[column][i]) modelData.append(record) model = Regressor(layers=[Layer("Rectifier", units=100), Layer("Linear")], learning_rate=0.02, n_iter=100) model.fit(modelData, data[targetColumn]) return NeuralNetworkModel(model, modelColumns)
from sknn.mlp import Regressor from sknn.mlp import Layer import numpy as np import matplotlib.pyplot as plt # Design Network hiddenLayer = Layer("Rectifier", units=6) outputLayer = Layer("Linear", units=1) nn = Regressor([hiddenLayer, outputLayer], learning_rule='sgd', learning_rate=.001, batch_size=5, loss_type="mse") # Generate Data def cubic(x): return x**3 + x**2 - x - 1 def get_cubic_data(start, end, step_size): X = np.arange(start, end, step_size) X.shape = (len(X), 1) y = np.array([cubic(X[i]) for i in range(len(X))]) y.shape = (len(y), 1) return X, y # Train Model X, y = get_cubic_data(-2, 2, .1)
#============================Save pre-processed data=========================== data.to_csv('revised_data.csv', index=False) data = pd.read_csv('revised_data.csv') #============================================================================== def calculate_RMSE(predicted, actual): return math.sqrt(mean_squared_error(actual, predicted)) #===========================Neural Network Fitting============================= training_data = data.copy() training_data.drop('duration', 1, inplace=True) target_data = training_data.pop('size') #cross validation X_train, X_test, y_train, y_test = cross_validation.train_test_split( training_data.values, target_data.values, test_size=0.1, random_state=42) i = 0.1 neu_net_reg = Regressor(layers=[Layer("Sigmoid", units=30), Layer("Linear")], learning_rate=i, n_iter=19) neu_net_reg.fit(X_train, y_train) predicted_target_data = neu_net_reg.predict(X_test) print 'Learning rate: ' + str(i) + ' RMSE is: ' + str( calculate_RMSE(y_test, predicted_target_data)) #==============================================================================
melee = Melee() # gameState = [0 for i in acceptedInputs] # controllerState = [0 for i in acceptedOutputs] inputs = [] outputs = [] melee.listen(formattedReplay, lambda x, y: listener(x, y, inputs, outputs)) # with open(sys.argv[2], 'w') as outfile: # json.dump(inputs + outputs, outfile) nn = Regressor( layers=[ # Layer("Sigmoid",units=100), Layer("Sigmoid", units=200), Layer("Linear") ], learning_rate=0.02, n_iter=80) inScaler = StandardScaler() npin = np.array(inputs) inScaler.fit(npin) npout = np.array(outputs) # print(insc) # for i in inScaler.transform(npin): # print(i) nn.fit(inScaler.transform(npin), npout) pickle.dump((acceptedInputs, nn), open('nn4.pkl', 'wb')) # print(nn.predict(i for i in inputs[10])) # for i in inputs:
X_train_bg = X_train_bg_all[0:nBackgroundEvents,:] X_test_bg = X_test_bg_all[0:nBackgroundEvents,:] X_test_sig = buildArraysFromROOT(tree,susyFeaturesNtup,cutSignal,nSignalEvents,nSignalEvents,"TESTING SAMPLE (signal)") X_test_sig = min_max_scaler.transform(X_test_sig) # Set target equal to input - auto-encoder Y_train = X_train_bg # NEURAL NETWORK TRAINING AND TESTING # Set up neural network if runTraining: print "Starting neural network training" nn = Regressor( layers=[ Layer("Rectifier", units=30), Layer("Linear")], learning_rate=0.01, batch_size = 100, #learning_rule = "momentum", n_iter=100) #valid_size=0.25) # Training nn.fit(X_train_bg,Y_train) pickle.dump(nn, open('autoencoder.pkl', 'wb')) if not runTraining: nn = pickle.load(open('autoencoder.pkl', 'rb')) # Testing predicted_diff = nn.predict(X_test_bg) predicted_signal = nn.predict(X_test_sig) # Reconstruction error
ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred))) ll = ll * -1.0/len(act) return ll print 'Loading data...' print('Before read_csv', int((time.time() - t_start) * 1000)) Xtrain = pd.read_csv(options.xtrain) Xval = pd.read_csv(options.xval) Xtest = pd.read_csv(options.xtest) Ytrain = pd.read_csv(options.ytrain)['Converted'] Yval = pd.read_csv(options.yval)['Converted'] print('After read_csv', int((time.time() - t_start) * 1000)) print 'Fitting...' clf = Regressor( layers=, warning=None, parameters=None, random_state=None, learning_rule=u'sgd', learning_rate=0.01, learning_momentum=0.9, normalize=None, regularize=None, weight_decay=None, dropout_rate=None, batch_size=1, n_iter=None, n_stable=10, f_stable=0.001, valid_set=None,
nn.n_iter = n_epoch % save_part nn.fit(X_train, Y_train) pickle.dump(nn, open(path, 'w')) N = 100 X, Y = create_ds(N) print X.shape, '--', Y.shape print X[:5] print Y[:5] print '___________' nn = Regressor( layers=[ #Convolution("Rectifier",channels=1,kernel_shape=(1,1)), Layer("Rectifier", units=128), Layer("Rectifier", units=128), Layer("Linear", units=64), Layer("Tanh") ], learning_rate=0.01, verbose=True) train(nn, X, Y, './mod_prim', 2, 2) print "#-----TESTING-----#" nn = pickle.load(open('./mod_prim', 'r')) test = create_ds(2 * N) pred = nn.predict(test) for i, p in enumerate(pred): #plt.imshow(test[i]) #plt.show() print test[i], ' == ', round(1 / p)
import numpy import time from sklearn import preprocessing from sklearn import metrics from sknn.mlp import Classifier, Regressor, Layer training_data = "FullModemConfigSpace-2015-07-19.csv" training = numpy.loadtxt(open(training_data), delimiter=",", skiprows=1) testing_data = "path to dataset used for testing here" testing = numpy.loadtxt( open(testing_data), delimiter= "space, comma, semicolon, whatever separates the attributes in the samples" ) """skiprows=1 if the attribute names are at the top""" tr_x = dataset[:, 0:38] tr_y = training[:, 39:43] ts_x = testing[:, 0:38] ts_y = testing[:, 39:43] network = Regressor(layers=[ Layer("Linear", units=39), Layer("Sigmoid", units=22), Layer("Linear", units=4) ], learning_rate=0.001, n_iter=25) network.fit(tr_x, tr_y) cont_prediction = cont_network.predict(ts_x) """This will write the predictions to an output file, I'm pretty sure you can change the extension as you like.""" with open("full_prediction.txt", "w") as output: for y in cont_prediction: print(y, file=output)
regline.set_color('red') R2_score_DF_RF_CV = r2_score(predict_DF_RF_CV["AC_cons"], predict_DF_RF_CV["AC_ConsPred_RF_CV"]) mean_absolute_error_DF_CV = mean_absolute_error( predict_DF_RF_CV["AC_cons"], predict_DF_RF_CV["AC_ConsPred_RF_CV"]) mean_squared_error_DF_CV = mean_squared_error( predict_DF_RF_CV["AC_cons"], predict_DF_RF_CV["AC_ConsPred_RF_CV"]) coeff_variation_DF_CV = np.sqrt( mean_squared_error_DF_CV) / predict_DF_RF_CV["AC_cons"].mean() from sknn.mlp import Regressor, Layer reg_NN = Regressor( layers=[ Layer("Rectifier", units=5), # Hidden Layer1 Layer("Rectifier", units=3), # Hidden Layer2 Layer("Linear") ], # Output Layer n_iter=100, learning_rate=0.02) reg_NN.fit(X_train_norm.as_matrix(), y_train_norm.as_matrix()) predict_DF_NN = reg_NN.predict(X_test_norm.as_matrix()) predict_DF_NN_CV = pd.DataFrame(predict_DF_NN, index=y_test_norm.index, columns=["AC_ConsPred_NN_CV"]) predict_DF_NN_CV = predict_DF_NN_CV.join(y_test_norm).dropna() predict_DF_NN_CV['2014-08-01':'2014-08-20'].plot() R2_score_DF_NN_CV = r2_score(predict_DF_NN_CV["AC_cons"], predict_DF_NN_CV["AC_ConsPred_NN_CV"]) mean_absolute_error_DF_CV = mean_absolute_error(
# importance = bst.get_fscore() # 特征的重要性 # plot_importance(bst) # pyplot.show() pre_label = bst.predict(xgbtest) loss = t-pre_label MAPE = (sum(abs(loss)/t))/len(t) mapeSet.append(MAPE) para.append([inn,im,ie]) count=count+1 print('---> ',count,'......') # scikit-neuralnetwork for regression(有问题,无法训练) if 0: mlp = Regressor(layers=[Layer('Rectifier',units=100,weight_decay=0.0001,dropout=0.5),Layer('Linear')],learning_rule='sgd', learning_rate=0.01, batch_size=500, n_iter=10,loss_type = 'mse') mlp.fit(X,y) pre_label = mlp.predict(T) ''' # %% # plot loss = t-pre_label # 误差 Z = np.zeros([len(loss)]) plt.plot(loss,'g') plt.plot(Z,'r') plt.xlabel('Number of the sample') plt.ylabel('loss(s)') plt.title('Visualizing loss') plt.show() # %%
elif method == 'svc': classifier = svm.SVC(kernel='sigmoid', probability=True, random_state=random_state, verbose=False) elif method == 'svr': classifier = svm.SVR(kernel='rbf', verbose=False, cache_size=1000) elif method == 'nnreg': classifier = Regressor( layers=[ Layer('Linear', name='hidden0', units=10), Layer('Sigmoid', name='hidden0', units=10), Layer('Tanh', name='hidden0', units=10), # Layer('Linear', name='hidden0', units=50), # Layer('Rectifier', name='hidden1', units=3), # Layer('Linear', name='hidden2', units=5), Layer('Linear') ], learning_rate=0.001, n_iter=25) elif method == 'linReg': classifier = linear_model.LinearRegression() else: print('dumbass') exit() if eval: if method in ['nnreg', 'linReg', 'svr']: rkf = RepeatedKFold(n_splits=5, n_repeats=1, random_state=random_state)