x_train = scaler.transform(x_train) x_test = scaler.transform(x_test) y = y * 2 - 1 print("x_train", x_train.shape, x_train.min(), x_train.max()) print("x_test", x_test.shape, x_test.min(), x_test.max()) print("y", y.shape, y.min(), y.max()) x_test = np.clip(x_test, a_min=-1, a_max=1) print("x_test", x_test.shape, x_test.min(), x_test.max()) mlp = MLP(layer_size=[x_train.shape[1], 28, 28, 28, 1], regularization=1, output_shrink=0.1, output_range=[-1, 1], loss_type="hardmse") """ train 15 epoches, batch_size=1, SGD """ mlp.train(x_train, y, verbose=2, iteration_log=20000, rate_init=0.08, rate_decay=0.8, epoch_train=15, epoch_decay=1) pred = mlp.predict(x_test) pred = pred.reshape(-1)
scaler = MinMaxScaler(feature_range=(0, 1)).fit( np.concatenate((x_train_data, x_test_data), axis=0)) x_train_data = scaler.transform(x_train_data) x_test_data = scaler.transform(x_test_data) # In[ ]: import ultimate from ultimate.mlp import MLP epoch_train = 1000 mlp = MLP(layer_size=[x_train_data.shape[1], 28, 28, 1], regularization=1, output_shrink=0.1, output_range=[0, 1], loss_type="hardmse") mlp.train(x_train_data, p_train_data, iteration_log=20000, rate_init=0.08, rate_decay=0.8, epoch_train=epoch_train, epoch_decay=1) # In[ ]: predictions = np.round(mlp.predict(x_test_data).reshape(-1)) # In[ ]:
#X_train = np.load('X_train_hok_msm.npy') #y = np.load('y_hok_msm.npy') epoch_decay = 2 epoch_train = epoch_decay * 18 rate_init = 0.008 hidden_size = 32 verbose = 1 activation = 'a2m2l' leaky = -0.001 mlp = MLP( layer_size=[X_train.shape[1], hidden_size, hidden_size, hidden_size, 1], activation=activation, leaky=leaky, bias_rate=[], regularization=1, importance_mul=0.0001, output_shrink=0.1, output_range=[-1, 1], loss_type="hardmse") mlp.train(X_train, y, verbose=verbose, importance_out=True, iteration_log=20000, rate_init=rate_init, rate_decay=0.8, epoch_train=epoch_train, epoch_decay=epoch_decay) X_test, _ = feature_engineering(False)
gc.collect() X_train['totalDistance'] = X_train['walkDistance'] + X_train[ 'rideDistance'] + X_train['swimDistance'] X_test['totalDistance'] = X_test['walkDistance'] + X_test[ 'rideDistance'] + X_test['swimDistance'] del X_train['walkDistance'], X_train['rideDistance'], X_train[ 'swimDistance'] del X_test['walkDistance'], X_test['rideDistance'], X_test['swimDistance'] gc.collect() scaler = preprocessing.MinMaxScaler(copy=False).fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) mlp_param = { 'loss_type': 'mse', 'layer_size': [X_train.shape[1], 8, 8, 8, 1], 'output_range': [0, 1], 'importance_out': True, 'rate_init': 0.02, 'epoch_train': 5, 'epoch_decay': 10, 'verbose': 1, } mlp = MLP(mlp_param) mlp.fit(X_train, y_train) print("feature importances:", mlp.feature_importances_) pred = mlp.predict(X_test) save_csv(pred, test) end_time = time.time() print('The total of time is', end_time - start_time)