def generate_nn_solver_ratio_result(X_train, X_test, y_train, y_test):
    # generate the result for random samples
    ratio_result = pd.DataFrame(y_test, columns=['ratio_baseline'])

    print "Solver Function: lbfgs"
    model = neural_network.MLPRegressor(solver='lbfgs',
                                        max_iter=1000,
                                        learning_rate_init=0.005)
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)
    ratio_result['lbfgs'] = y_pred

    print "Solver Function: sgd"
    model = neural_network.MLPRegressor(solver='sgd',
                                        max_iter=1000,
                                        learning_rate_init=0.005)
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)
    ratio_result['sgd'] = y_pred

    print "Solver Function: adam"
    model = neural_network.MLPRegressor(solver='adam',
                                        max_iter=1000,
                                        learning_rate_init=0.005)
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)
    ratio_result['adam'] = y_pred

    return ratio_result
Пример #2
0
    def neural_network_split(self,
                             X,
                             yLeft,
                             yRight,
                             hidden_layers=(5, 5),
                             act="relu",
                             solve="sgd"):

        # fit left model
        xTrain, xTest, yTrain, yTest = train_test_split(X,
                                                        yLeft,
                                                        test_size=0.4,
                                                        shuffle=True)
        nn = neural_network.MLPRegressor(hidden_layers,
                                         activation=act,
                                         solver=solve)
        lm = nn.fit(xTrain, yTrain)
        leftScore = lm.score(xTest, yTest)

        # fit right model
        xTrain, xTest, yTrain, yTest = train_test_split(X,
                                                        yRight,
                                                        test_size=0.4,
                                                        shuffle=True)
        nn = neural_network.MLPRegressor(hidden_layers,
                                         activation=act,
                                         solver=solve)
        rm = nn.fit(xTrain, yTrain)
        rightScore = rm.score(xTest, yTest)

        return (lm, rm, [leftScore, rightScore])
Пример #3
0
    def __init__(self,
                 number,
                 verbose=False,
                 hiddenlayers=[50, 30, 15],
                 alpha=0.01,
                 epsilon=0.05,
                 gamma=0.8,
                 filename='CarlosMonteros'):
        super().__init__(number, verbose)
        self.alpha = alpha
        # Note: epislon unused since we use softmax decision making
        self.epsilon = epsilon
        self.gamma = gamma
        self.name = 'Carlos Monteros'
        self.hiddenlayers = hiddenlayers
        self.episodeStates = []
        self.episodeActions = []
        self.episodeReturns = []
        self.goActionIndex = 14
        self.noActionIndex = 15
        self.currentAction = self.noActionIndex
        # Need to track previous score to calculate rewards
        self.prevScore = 0
        self.cribThrow = []
        self.throwingState = []
        self.filename = filename
        self.filedir = 'BrainsInJars'
        self.fullfilepegging = os.path.join(os.getcwd(), self.filedir,
                                            self.filename + "_peg.brain")
        self.fullfilethrowing = os.path.join(os.getcwd(), self.filedir,
                                             self.filename + "_throw.brain")
        if os.path.exists(self.fullfilepegging):
            self.pegbrain = joblib.load(self.fullfilepegging)
        else:
            self.pegbrain = sknn.MLPRegressor(
                hidden_layer_sizes=self.hiddenlayers,
                activation='relu',
                solver='adam',
                alpha=self.alpha,
                batch_size=4,
                max_iter=200)

        if os.path.exists(self.fullfilethrowing):
            self.throwingBrain = joblib.load(self.fullfilethrowing)
        else:
            self.throwingBrain = sknn.MLPRegressor(hidden_layer_sizes=(25, 10),
                                                   activation='relu',
                                                   solver='adam',
                                                   alpha=self.alpha,
                                                   batch_size=1,
                                                   max_iter=200)
        warnings.filterwarnings("ignore", category=UserWarning)
Пример #4
0
def MLPReg(tsize=30, rep=500):
    '''
    Test the MLP (multi-layer perceptron) regressor.
    Use standard scaling of features.
    MLP is said to be sensitive to scaling,
    so this is a separate function for testing MLP with feature scaling.
    '''
    X, y = getNumericXy()
    Xs = StandardScaler().fit_transform(X)
    rmselist = []
    for r in range(rep):
        print(r, end=",")
        if r % 30 == 0: print()
        X_train, X_test, y_train, y_test = train_test_split(Xs,
                                                            y,
                                                            test_size=tsize /
                                                            100.0)
        mdl = neural_network.MLPRegressor(max_iter=1000)
        model = mdl.fit(X_train, y_train)
        predictions = model.predict(X_test)
        rmse = np.sqrt(mean_squared_error(y_test, predictions))
        rmselist.append(rmse)
    plt.figure()
    #plt.hist(rmselist,max(int(np.sqrt(rep)*1.5),10))
    sns.distplot(rmselist)
    return rmselist
Пример #5
0
    def evaluate(self, param_index, is_save, first_time, total_time):
        f1 = {}
        count = 0
        for i in param_index:
            model_path = ""
            evaluate_result = {}
            try:
                param = self.origin_params[i]
                hidden_layer_sizes_list = []
                for j in range(param['n_layers'] - 3):
                    hidden_layer_sizes_list.append(param['hidden_layer_size'])
                n_layers = param.pop('n_layers')
                hidden_layer_size = param.pop('hidden_layer_size')
                param['hidden_layer_sizes'] = tuple(hidden_layer_sizes_list)
                clf = neural_network.MLPRegressor(**param)
                # rf = clf.fit(self.x_train, self.y_train, eval_set=[(self.x_test, self.y_test)],
                #               eval_metric='mlogloss', verbose=True)
                rf = clf.fit(self.x_train, self.y_train)
                y_pred = rf.predict(self.x_test)
                evaluate_result = classifier_evaluate(self.y_test, y_pred, self.log)
                f1[i] = metrics.f1_score(self.y_test, y_pred, average='weighted')
                if is_save:
                    model_path = sys.path[0] + os.sep + str(uuid.uuid1())
                    joblib.dump(rf, model_path)

            except Exception, e:
                result = eva_result(False, e.message, 'sklearn', 'MLPRegressor', param,
                                    evaluate_result, 'classification', model_path, first_time + count, total_time, self.class_name)
            else:
                result = eva_result(True, '', 'sklearn', 'MLPRegressor', param,
                                    evaluate_result, 'classification', model_path, first_time + count, total_time, self.class_name)

            # print json.dumps(result).decode("unicode-escape")
            print json.dumps(result)
            count += 1
Пример #6
0
def nn_sip(x_train_, y_train_, x_test_, y_test_):
    activation_list = ['identity', 'logistic', 'tanh', 'relu']
    colors_list = ['red', 'green', 'blue', 'black']
    marks_list = ['o', '*', '^', 's']
    plt.figure(figsize=(20, 10))
    for act, color, mark in zip(activation_list, colors_list, marks_list):
        print(act)
        nn_reg = neural_network.MLPRegressor(random_state=1, activation=act)
        nn_reg = nn_reg.fit(x_train_, y_train_)
        predict = nn_reg.predict(x_test_)
        pd.DataFrame(predict).to_csv(store_path + '\\nn_result.csv', index=False, sep=',')
        print("score: ", nn_reg.score(x_test_, y_test_))
        count = Counter(predict * y_test_ > 0)
        accuracy = count[True] / (count[True] + count[False])
        print(count)
        print('accuracy:', accuracy)
        plt.scatter(x_test_.index, predict - y_test_, s=5, c=color, marker=mark,
                    label=act + ':' + str(round(accuracy, 2)))

    plt.axhline(c='black')
    plt.legend()
    plt.title('Prediction Error of Neural Network')
    plt.xlabel("Date")
    plt.ylabel("Prediction Error")
    plt.savefig(store_path + r".\nn.png", dpi=600, bbox_inches='tight')
    plt.show()
Пример #7
0
def _get_model(db, logger):
    """
    Create prediction model.

    The model is defined as a two-step pipeline:
     - one-hot encoder for city, hour, day_of_week and country features,
     - and a simple neural network for regression.

    :param gpudb.GPUdb db: Kinetica DB connection
    :rtype: (int, pipeline.Pipeline, int)
    """

    model_records = db.get_records_and_decode(
        table_name='prediction_model', offset=0, limit=1,
        options={'sort_by': 'created_on', 'sort_order': 'descending'})

    if len(model_records['records']) > 0:
        logger.info('Model found in DB')
        model = model_records['records'][0]
        classifier = pickle.loads(model['dump'])
        return model['model_id'], classifier, model['created_on']
    else:
        logger.info('No model found in the DB, creating new one from scratch')
        column_transformer = compose.ColumnTransformer([
            ('oh', preprocessing.OneHotEncoder(handle_unknown='ignore'), ['city', 'hour', 'day_of_week', 'country']),
            ('do_nothing', preprocessing.MinMaxScaler(), ['group_members', 'group_events'])
        ])
        classifier = neural_network.MLPRegressor(hidden_layer_sizes=(1500, 750, 375), max_iter=1000, shuffle=True)
        return 0, (column_transformer, classifier), None
Пример #8
0
def calc_mlp(neuron_count_1, neuron_count_2):
    global iter, hyper_param_list
    clf = neural_network.MLPRegressor(hidden_layer_sizes=(neuron_count_1,
                                                          neuron_count_2),
                                      max_iter=500)
    clf.fit(X_train, y_train)
    train_mae = metrics.mean_absolute_error(y_train, clf.predict(X_train))
    train_mse = metrics.mean_squared_error(y_train, clf.predict(X_train))
    valid_mae = metrics.mean_absolute_error(y_valid, clf.predict(X_valid))
    valid_mse = metrics.mean_squared_error(y_valid, clf.predict(X_valid))

    with hyper_param_list_lock:
        hyper_param_list.append((neuron_count_1, neuron_count_2, train_mae,
                                 train_mse, valid_mae, valid_mse))

    with print_lock:
        iter += 1
        print('Iteration: ' + str(iter))
        print('Neurons 1: ' + str(neuron_count_1))
        print('Neurons 2: ' + str(neuron_count_2))
        print('max RUL: ' + str(max_RUL))
        print('\t' + 'Train Mean Absolute Error: ' + str(train_mae))
        print('\t' + 'Train Mean Squared Error: ' + str(train_mse))
        print('\t' + 'Test Mean Absolute Error: ' + str(valid_mae))
        print('\t' + 'Test Mean Squared Error: ' + str(valid_mse))
        print('\n')
Пример #9
0
def crossValidateModel(train_in, train_out, model_name='', n=5):
    '''
    Run n-fold cross-validation for training data with various methods
    '''
    train_in = map(lambda x: x.values.T[0], train_in)
    train_out = map(lambda x: x.values.T[0], train_out)
    if model_name == 'SVM':
        model = make_pipeline(preprocessing.StandardScaler(), svm.SVR())
    elif model_name == 'ANN':
        model = make_pipeline(preprocessing.StandardScaler(),
                              neural_network.MLPRegressor(max_iter=1))
    elif model_name == 'DT':
        model = make_pipeline(preprocessing.StandardScaler(),
                              tree.DecisionTreeRegressor())
    elif model_name == 'GTB':
        model = make_pipeline(preprocessing.StandardScaler(),
                              ensemble.GradientBoostingRegressor())
    elif model_name == 'Random Forest':
        model = make_pipeline(preprocessing.StandardScaler(),
                              ensemble.RandomForestRegressor())
    elif model_name == 'Extra Trees':
        model = make_pipeline(preprocessing.StandardScaler(),
                              ensemble.ExtraTreesRegressor())
    elif model_name == 'ADABoost':
        model = make_pipeline(preprocessing.StandardScaler(),
                              ensemble.AdaBoostRegressor())

    scores = cross_val_score(model, train_in, train_out, cv=n)
    print '{} averaged {} for {}-fold cross validation'.format(
        model_name, (sum(scores)) / n, n)
Пример #10
0
def _get_base_ml_model(method):
    regressor = None
    if method == 'lr':
        regressor = linear_model.LinearRegression()
    if method == 'huber':
        regressor = linear_model.HuberRegressor(max_iter=50)
        regressor = multioutput.MultiOutputRegressor(regressor)
    if method == 'svr':
        regressor = svm.LinearSVR()
        regressor = multioutput.MultiOutputRegressor(regressor)
    if method == 'kr':
        regressor = kernel_ridge.KernelRidge(kernel='rbf')
    if method == 'rf':
        regressor = ensemble.RandomForestRegressor(n_estimators=50, n_jobs=8)
    if method == 'gbm':
        regressor = lgb.LGBMRegressor(max_depth=20,
                                      num_leaves=1000,
                                      n_estimators=100,
                                      min_child_samples=5,
                                      random_state=42)
        regressor = multioutput.MultiOutputRegressor(regressor)
    if method == 'nn':
        regressor = neural_network.MLPRegressor(hidden_layer_sizes=(25, 25),
                                                early_stopping=True,
                                                max_iter=1000000,
                                                alpha=0.01)

    return regressor
Пример #11
0
 def run(self):
     reqs = self.requires()
     featurized = reqs['featurized']
     with featurized.output().open('r') as f:
         featurized_df = pd.read_csv(f)
     with reqs['scaler'].output().open('rb') as f:
         scaler = pickle.load(f)
     y = featurized_df['activity']
     X = featurized_df[featurized_df.columns.difference(
         ['activity', 'kmer'])]
     X_train = scaler.transform(X)
     #X_train = X
     if self.model_str == 'GB':
         model = ensemble.GradientBoostingRegressor()
     elif self.model_str == 'RF':
         model = ensemble.RandomForestRegressor()
     elif self.model_str == 'lasso':
         model = linear_model.Lasso()
     elif self.model_str == 'EN':
         model = linear_model.ElasticNet()
     elif self.model_str == 'NN':
         model = neural_network.MLPRegressor()
     grid_search = model_selection.RandomizedSearchCV(
         model,
         dict(self.param_grid),
         cv=self.folds,
         scoring='neg_mean_squared_error',
         n_iter=20,
         n_jobs=1)
     grid_search.fit(X_train, y)
     # Use path because we have to write binary (stack: localTarget pickle)
     with self.output().open('wb') as f:
         pickle.dump(grid_search, f)
Пример #12
0
def build_model(model_type, num_targets = 1):
    if model_type == 'linear_regression':
        base = linear_model.SGDRegressor()
    elif model_type == 'random_forests':
        base = ensemble.RandomForestRegressor()
    elif model_type == 'gradient_boosting':
        base = ensemble.GradientBoostingRegressor()
    elif model_type == 'extra_trees':
        base = ensemble.ExtraTreesRegressor()
    elif model_type == 'bagging':
        base = ensemble.BaggingRegressor()
    elif model_type == 'adaboost':
        base = ensemble.AdaBoostRegressor()
    elif model_type == 'neural_network':
        base = neural_network.MLPRegressor()
    elif model_type == 'svm':
        base = svm.SVR(verbose=1)
    elif model_type == 'constant_mean':
        base = dummy.DummyRegressor('mean')
    elif model_type == 'constant_median':
        base = dummy.DummyRegressor('median')
    elif model_type == 'constant_zero':
        base = dummy.DummyRegressor('constant', constant=0)
    else:
        raise(ValueError('invalid model type: {}'.format(model_type)))

    # multiple outputs in the dataset => fit a separate regressor to each
    if num_targets > 1:
        return multioutput.MultiOutputRegressor(base)
    else:
        return base
Пример #13
0
def mlp_regression(parameter_array):
    layer_value = parameter_array[0]
    second_layer_value = parameter_array[1]
    learning_rate = parameter_array[2]
    return neural_network.MLPRegressor(hidden_layer_sizes=(layer_value,
                                                           second_layer_value),
                                       activation='identity',
                                       solver='adam',
                                       alpha=1,
                                       batch_size='auto',
                                       learning_rate='constant',
                                       learning_rate_init=learning_rate,
                                       power_t=0.5,
                                       max_iter=200,
                                       shuffle=True,
                                       random_state=None,
                                       tol=0.0001,
                                       verbose=False,
                                       warm_start=False,
                                       momentum=0.9,
                                       nesterovs_momentum=True,
                                       early_stopping=False,
                                       validation_fraction=0.1,
                                       beta_1=0.9,
                                       beta_2=0.999,
                                       epsilon=1e-08)
Пример #14
0
def main():
    start_date = '2010-01-01'
    end_date = '2020-04-06'

    training_x, training_y = DataGenerator_for_correlation_based.make_features(
        start_date, end_date, is_training=True)

    # TODO: set model parameters
    model = NN.MLPRegressor(hidden_layer_sizes=[70, 50, 30, 10],
                            max_iter=50000,
                            solver='adam',
                            shuffle=True,
                            activation='relu',
                            learning_rate='constant',
                            early_stopping=True,
                            batch_size='auto',
                            random_state=13,
                            learning_rate_init=0.001,
                            n_iter_no_change=20,
                            validation_fraction=0.2,
                            alpha=0.01,
                            epsilon=1e-08,
                            verbose=True)
    model.fit(training_x, training_y)

    # TODO: fix pickle file name
    filename = './model/team08_model.pkl'
    pickle.dump(model, open(filename, 'wb'))
    print('saved {}'.format(filename))
Пример #15
0
def dnn_prediction(X, Y):
    x = []

    x.append((X[len(X) - 1] + 1))
    predictor = neural_network.MLPRegressor(hidden_layer_sizes=(50),
                                            activation='relu',
                                            max_iter=100)
    for i in range(Y.shape[1] - 5):
        training_values = np.array([[Y[0][y_i] for y_i in range(i, i + 5)]])
        result = np.array([[Y[0][i + 5]]])
        predictor.fit(training_values, result)
    for i in range(5):

        n = Y.shape[1]

        testing = [Y[0][y_i] for y_i in range(Y.shape[1] - 5, Y.shape[1])]
        inpt = np.array([testing])
        prediction = predictor.predict(inpt)

        tmp = []
        for i in range(Y.shape[1]):
            tmp.append(Y[0][i])

        tmp.append(prediction[0])

        Y = np.array([[*tmp[-5:-1]]])
        np.round(Y)
        while len(x) != len(Y[0]):
            x.append(x[len(x) - 1] + 1)

    return x, Y[0]
Пример #16
0
    def __init__(self,
                 response_var,
                 covariates,
                 log_covariates,
                 log_correction,
                 log_correction_const,
                 regularization_weight=None,
                 normalize_params=False,
                 t_k=None,
                 add_exposure=False,
                 extra_model_params=None):
        super().__init__(response_var, covariates, log_covariates,
                         log_correction, log_correction_const,
                         regularization_weight, normalize_params, t_k,
                         add_exposure)
        self.inputs = self.covariates + self.log_covariates
        self.variables = [self.response_var] + self.inputs

        if extra_model_params:
            print('Extra Model Args', extra_model_params)
            hidden_layer_sizes = extra_model_params
        else:
            hidden_layer_sizes = (32, )

        self.fit_result = sklearn_nn.MLPRegressor(
            hidden_layer_sizes=hidden_layer_sizes)
def get_algolib_regression():
    return [
        linear_model.LinearRegression(),
        # linear_model.LassoLarsIC(criterion='aic'),
        # LinearGAM(),
        linear_model.ElasticNet(),
        # Earth(),
        # linear_model.BayesianRidge(),
        ensemble.GradientBoostingRegressor(),
        neural_network.MLPRegressor(),
        ensemble.BaggingRegressor(),
        # tree.DecisionTreeClassifier(),
        ensemble.RandomForestRegressor(),
        # bart
        # ensemble.GradientBoostingClassifier(),
        # XGBClassifier(),
        # LGBMClassifier(),
    ], [
        'SL.glm',
        # 'SL.stepAIC',
        # 'SL.gam',
        'SL.glmnet',
        # 'SL.polymars',
        # 'SL.bayesglm',
        'SL.gbm',
        'SL.nnet',
        'SL.ipredbagg',
        # 'SL.rpartPrune',
        'SL.randomForest',
        # 'SL.bart',
        # 'GBDT',
        # 'XGBoost',
        # 'LightGBM'
    ]
Пример #18
0
 def test_basic(self, single_chunk_classification):
     X, y = single_chunk_classification
     a = nn.ParitalMLPRegressor(random_state=0)
     b = nn_.MLPRegressor(random_state=0)
     a.fit(X, y)
     b.partial_fit(X, y)
     assert_estimator_equal(a, b)
Пример #19
0
def generate_ratio_result(X_train, X_test, y_train, y_test):
    """
    train and predict the result with the training set and the test set

    :param X_train: the features of the training set
    :param X_test: the output of the training set
    :param y_train: the features of the test set
    :param y_test: the output of the test set
    :return: dataframe of the predicted result under different models
    """
    # generate the result for random samples
    ratio_result = pd.DataFrame(y_test, columns=['ratio_baseline'])

    model1 = linear_model.LinearRegression()
    model1.fit(X_train, y_train)
    y_pred = model1.predict(X_test)
    ratio_result['single_linear_regression'] = y_pred

    model2 = svm.SVR()
    model2.fit(X_train, y_train)
    y_pred = model2.predict(X_test)
    ratio_result['single_SVM'] = y_pred

    model3 = neural_network.MLPRegressor(solver='lbfgs', max_iter=1000, learning_rate_init=0.005)
    model3.fit(X_train, y_train)
    y_pred = model3.predict(X_test)
    ratio_result['single_NN'] = y_pred

    kernel = GPy.kern.Matern32(input_dim=6, ARD=True)
    m_full = GPy.models.SparseGPRegression(X_train, y_train.reshape(len(y_train), 1), kernel)
    m_full.optimize('bfgs')
    y_pred, y_var = m_full.predict(X_test)
    ratio_result['single_GP'] = y_pred

    return ratio_result
Пример #20
0
def Models():

    # Linear models
    forest = ensemble.RandomForestRegressor(n_estimators=150,
                                            min_samples_split=15,
                                            min_samples_leaf=3)
    extra = ensemble.ExtraTreesRegressor(
        n_estimators=150,
        max_features="auto",
        max_depth=30,
        min_samples_split=15,
        min_samples_leaf=3,
    )
    linear = linear_model.LinearRegression()
    neural_1 = neural_network.MLPRegressor(hidden_layer_sizes=(120, ),
                                           activation='relu',
                                           learning_rate_init=0.0001)
    ridge = linear_model.Ridge()
    KN = neighbors.KNeighborsRegressor(n_neighbors=120, weights='uniform')

    # Polynomial models
    polyness = make_pipeline(PolynomialFeatures(3), neural_1)

    ############ MODIFY TO CHOOSE MODEL ############
    model = ridge
    ############ MODIFY TO CHOOSE MODEL ############

    print("Choosen model: ", model, '\n')

    return (model)
Пример #21
0
    def __init__(self,
                 hidden_layer_sizes: tuple,
                 batch_size: int,
                 max_iter: int = 10000,
                 activation: str = "logistic",
                 solver: str = "adam",
                 random_state: int = 0,
                 verbose: bool = False,
                 learning_rate_init=1e-6,
                 early_stopping=False,
                 validation_fraction=0.1,
                 tol=1e-4,
                 alpha=0.0001,
                 learning_rate="adaptive",
                 n_iter_no_change=10):

        self._model = neural_network.MLPRegressor(
            hidden_layer_sizes=hidden_layer_sizes,
            batch_size=batch_size,
            max_iter=max_iter,
            tol=tol,
            alpha=alpha,
            learning_rate=learning_rate,
            validation_fraction=validation_fraction,
            activation=activation,
            solver=solver,
            random_state=random_state,
            verbose=verbose,
            learning_rate_init=learning_rate_init,
            early_stopping=early_stopping,
            n_iter_no_change=n_iter_no_change)
Пример #22
0
 def __init__(self, num_models, num_features):
     self.models = [neural_network.MLPRegressor()
                    for i in range(num_models)]
     self.num_models = num_models
     x_train_single = [np.random.random() for i in range(num_features)]
     for i in range(num_models):
         self.models[i].fit(x_train_single, np.random.random())
Пример #23
0
def mlp(x_train, y_train, x_test, y_test):
    #re-scale features to 0->1 range
    mmscaler = MinMaxScaler(feature_range=(0, 1))
    x_train = mmscaler.fit_transform(x_train)
    x_test = mmscaler.fit_transform(x_test)

    #train model
    model = neural_network.MLPRegressor(max_iter=10000)

    #optimize params with grid search
    params = {
        "hidden_layer_sizes": [5, 10],
        "activation": ["identity", "logistic", "tanh", "relu"],
        "solver": ["lbfgs", "sgd", "adam"],
        "alpha": [0.0005, 0.005]
    }
    gsModel = GridSearchCV(estimator=model, param_grid=params)

    #fit the model
    # model_fit = model.fit(x_train, np.ravel(y_train))
    gsModel.fit(x_train, np.ravel(y_train))
    y_pred = gsModel.predict(x_test)
    # print("Params chosen: ", model.get_params())
    error = math.sqrt(mean_squared_error(y_test, y_pred))
    return y_pred, error
Пример #24
0
    def trainAndTest(self):
        T,Q,Tinf0 = np.loadtxt( '1DGS_surfT_train.dat')
        Tchop0 = T[9:]
        Qchop0 = Q[9:]
        Tinfchop0 = Tinf0[9:]
#        np.savetxt('SKLearnTestWithTrain.dat',(Tinfchop0,Qchop0))

        scaler = skl.StandardScaler( copy=False )
        scaler.fit( Tchop0.reshape(-1,1) )
        scaler.transform( Tchop0.reshape(-1,1) )
        # Do the previous time step magic
        T_train, Q_train = self.makeDelT( Tchop0, Qchop0 )
        
        # Define and train the NN
        mlp = NN.MLPRegressor( hidden_layer_sizes=(10), max_iter=100000 ) #2,10,1
        mlp.fit( T_train, Q_train )
        
        ## Verify that the parameters actually give back the original training set
        yhat_train = mlp.predict( T_train )

        '''This begins the testing part.  This could be put into separate 
        functions in the future'''
        T1, Q1, Tinf1 = np.loadtxt('1DGS_surfT_test.dat')
        T2chop = T1[9:]
        Q2chop = Q1[9:]
        
        scaler = skl.StandardScaler( copy=False )
        scaler.fit( T2chop.reshape(-1,1) )
        scaler.transform( T2chop.reshape(-1,1) )
        T_test, Q_test = self.makeDelT(T2chop,Q2chop)
        yhat_test = mlp.predict(T_test)
        return yhat_train, yhat_test
Пример #25
0
def non_bayesian_model(name, task):
    if name == 'linear' and task == 'regression':
        return regression_model(linear_model.LinearRegression())

    elif name == 'linear' and task == 'classification':
        return classification_model(linear_model.LogisticRegression())

    if name == 'svm' and task == 'regression':
        return regression_model(svm.SVR())

    elif name == 'svm' and task == 'classification':
        return classification_model(svm.SVC(probability=True))

    if name == 'knn' and task == 'regression':
        return regression_model(
            neighbors.KNeighborsRegressor())  # default is K=5

    elif name == 'knn' and task == 'classification':
        return classification_model(
            neighbors.KNeighborsClassifier())  # default is K=5

    elif name == 'naive_bayes' and task == 'classification':
        return classification_model(naive_bayes.GaussianNB())

    if name == 'decision_tree' and task == 'regression':
        return regression_model(tree.DecisionTreeRegressor())

    elif name == 'decision_tree' and task == 'classification':
        return classification_model(tree.DecisionTreeClassifier())

    if name == 'random_forest' and task == 'regression':
        return regression_model(ensemble.RandomForestRegressor())

    elif name == 'random_forest' and task == 'classification':
        return classification_model(
            ensemble.RandomForestClassifier())  # default is 10 estimators

    if name == 'gradient_boosting_machine' and task == 'regression':
        return regression_model(ensemble.GradientBoostingRegressor())

    elif name == 'gradient_boosting_machine' and task == 'classification':
        return classification_model(
            ensemble.GradientBoostingClassifier())  # default is 100 estimators

    if name == 'adaboost' and task == 'regression':
        return regression_model(ensemble.AdaBoostRegressor())

    elif name == 'adaboost' and task == 'classification':
        return classification_model(
            ensemble.AdaBoostClassifier())  # default is 100 estimators

    if name == 'mlp' and task == 'regression':
        return regression_model(neural_network.MLPRegressor())

    elif name == 'mlp' and task == 'classification':
        return classification_model(neural_network.MLPClassifier())

    else:
        return None
Пример #26
0
    def train_FC90net(trainx, trainy, testx=None, testy=None, results=None):
        print('\nTraining FC90Net...')

        FC90_kwargs = dict(max_iter=500,
                           solver='sgd',
                           learning_rate='adaptive',
                           momentum=params.momentum,
                           activation='relu',
                           verbose=params.verbose,
                           early_stopping=False,
                           random_state=seed)
        if Y.multioutcome:
            fl = Y.n_outcomes
        elif Y.multiclass:
            fl = Y.n_classes
        else:
            fl = 1

        if Y.multiclass:
            outcome = Y.outcome_names[0]
            hl_sizes = (5, 6, 7, fl)
            net = neural_network.MLPClassifier(hidden_layer_sizes=hl_sizes,
                                               **FC90_kwargs)

            net.fit(trainx, trainy)
            testp = net.predict(testx)
            trainp = net.predict(trainx)
            t_bacc = balanced_accuracy_score(testy, testp)

            results['test_balanced_accuracy'][outcome].append(t_bacc)

        else:
            hl_sizes = (
                5,
                6,
                7,
            )
            net = neural_network.MLPRegressor(hidden_layer_sizes=hl_sizes,
                                              **FC90_kwargs)

            net.fit(trainx, trainy)
            testp = net.predict(testx)
            trainp = net.predict(trainx)

            for i, outcome in enumerate(Y.outcome_names):
                if Y.multioutcome:
                    t_r2 = r2_score(testy[:, i], testp[:, i])
                    t_mae = mean_absolute_error(testy[:, i], testp[:, i])
                else:
                    t_r2 = r2_score(testy, testp)
                    t_mae = mean_absolute_error(testy, testp)

                results['test_r2_sklearn'][outcome].append(t_r2)
                results['test_mean_absolute_error'][outcome].append(t_mae)

        best_output = [trainp, trainy, testp, testy]
        output_names = ['trainp', 'trainy', 'testp', 'testy']

        return results, net, best_output, output_names
Пример #27
0
def mlp_regression(parameter_index_keeper):
    parameter_str = str(parameter_index_keeper)
    alpha_value = MLP_PARAMETER_VALUE_ARRAY[int(
        parameter_str[len(parameter_str) -
                      1])]  #alpha value index is first digit
    # layer_value = MLP_LAYER_ARRAY[int(parameter_str[len(parameter_str) - 1])]
    return neural_network.MLPRegressor(alpha=alpha_value,
                                       hidden_layer_sizes=(100, 10))
Пример #28
0
    def build(self):
        self.model = neural_network.MLPRegressor(
            self.config.hidden_layer_sizes)
        self.model.fit(np.reshape(np.zeros(self.observation_dim), (1, -1)),
                       np.reshape(np.zeros(self.action_dim), (1, -1)))
        self.model2 = neural_network.MLPRegressor(
            self.config.hidden_layer_sizes)
        self.model2.fit(np.reshape(np.zeros(self.observation_dim), (1, -1)),
                        np.reshape(np.zeros(self.action_dim), (1, -1)))

        self.lr = self.config.lr
        self.gamma = self.config.gamma
        self.epsilon = self.config.epsilon
        self.rev_action_map = {
            tuple(v): k
            for k, v in self.env.action_map.items()
        }
 def __init__(self, state_size, action_size, learn_rate):
     self.state_size = state_size
     self.action_size = action_size
     self.nn = neural_network.MLPRegressor(hidden_layer_sizes=(20, ),
                                           activation='relu',
                                           solver='sgd',
                                           learning_rate_init=learn_rate,
                                           max_iter=1)
Пример #30
0
 def test_basic(self, single_chunk_classification):
     X, y = single_chunk_classification
     a = nn.BigMLPRegressor(random_state=0)
     b = nn_.MLPRegressor(random_state=0)
     a.fit(X, y)
     b.partial_fit(X, y)
     for a_, b_ in zip(a.coefs_, b.coefs_):
         assert_eq(a_, b_)