예제 #1
0
def deserialize_mlp_regressor(model_dict):
    model = MLPRegressor(**model_dict['params'])

    model.coefs_ = model_dict['coefs_']
    model.loss_ = model_dict['loss_']
    model.intercepts_ = model_dict['intercepts_']
    model.n_iter_ = model_dict['n_iter_']
    model.n_layers_ = model_dict['n_layers_']
    model.n_outputs_ = model_dict['n_outputs_']
    model.out_activation_ = model_dict['out_activation_']

    return model
예제 #2
0
INDEL_RANGE: List[int] = list(range(MIN_INDEL, MAX_INDEL + 1))

SEQUENCE_MAPPER: Callable[[str], Any] = sequence_to_ordinals

RANDOM_STATE: int = 42

# Model with method 'fit(X, y)'
Model = NamedTuple('Model', [('name', str), ('model', Any), ('shorthand', str),
                             ('preprocess_X', Optional[Callable])])

mlp = MLPRegressor(hidden_layer_sizes=(100, 100, 100, 100, 100, 100),
                   solver='lbfgs',
                   max_iter=50,
                   verbose=True)
mlp.n_layers_ = 8

# feature_map_nystroem = Nystroem(gamma=.2, random_state=1, n_components=10000)
Model = NamedTuple('Model', [('name', str), ('model', Any),
                             ('shorthand', str)])

asdf: Dict[str, Model] = {}

MODELS: Dict[str, Model] = {
    'RandomForest':
    Model(name='Random Forest',
          model=RandomForestRegressor(n_jobs=-1,
                                      bootstrap=True,
                                      random_state=RANDOM_STATE,
                                      verbose=1),
          shorthand='rf',
예제 #3
0
#%%

reg.fit(train, train)
#%% Plot result on training data
output_eval_train = reg.predict(train)
fig = px.scatter(x=train[:,0], y=train[:,1],
    labels={'x': 'x[0]', 'y': 'x[1]'})
fig.add_scatter(x=output_eval_train[:,0], y=output_eval_train[:,1], mode='markers')
#%% Plot result on test data
output_eval_test = reg.predict(test)
fig = px.scatter(x=test[:,0], y=test[:,1],
    labels={'x': 'x[0]', 'y': 'x[1]'})
fig.add_scatter(x=output_eval_test[:,0], y=output_eval_test[:,1], mode='markers')
# %% Cut the network in half at the hidden 1D layer
#    to get values of the hidden parameter
reg.n_layers_ = ((reg.n_layers_ - 2)+1) // 2 + 1
#%%
ae_parm = reg.predict(train)
fig = go.Figure()
fig.add_scatter(x=ae_parm, y=train[:,0], 
    mode='markers', name='x[0]')
fig.add_scatter(x=ae_parm, y=output_eval_train[:,0], 
    mode='markers', name='x[0] reduced')
fig.add_scatter(x=ae_parm, y=train[:,1], 
    mode='markers', name='x[1]')
fig.add_scatter(x=ae_parm, y=output_eval_train[:,1], 
    mode='markers', name='x[1] reduced')
fig.update_layout(
    xaxis_title = 't (hidden curve parameter)',
    yaxis_title = 'x[0], x[1]')
# %%
예제 #4
0
print 'R2 score:', score
print 'MAE:', mean_absolute_error(testing_labels,preds), '\n'
 
# PCA + Gradient Boosting Regression
gbr = GradientBoostingRegressor(min_samples_split=4)
gbr.fit(reduced_training_features, training_labels)
preds = gbr.predict(reduced_testing_features)
score = gbr.score(reduced_testing_features,testing_labels)
print 'PCA + Gradient Boosting Regression Results:'
print 'R2 score:', score
print 'MAE:', mean_absolute_error(testing_labels,preds)
 
# Multi-Layer Perceptron Regression
from sklearn.neural_network import MLPRegressor
mlp = MLPRegressor(max_iter=2500)
mlp.n_layers_=75
mlp.fit(training_features, training_labels)
preds = mlp.predict(testing_features)
score = mlp.score(testing_features,testing_labels)
print 'Multi-Layer Perceptron Regression Results:'
print 'R2 score:', score
print 'MAE:', mean_absolute_error(testing_labels,preds), '\n'
 
# PCA + Multi-Layer Perceptron Regression
mlp = MLPRegressor(max_iter=2500)
mlp.n_layers_=75
mlp.fit(reduced_training_features, training_labels)
preds = mlp.predict(reduced_testing_features)
score = mlp.score(reduced_testing_features,testing_labels)
print 'PCA + Multi-Layer Perceptron Regression Results:'
print 'R2 score:', score
예제 #5
0
    list([[7.61211153e-01], [5.29023058e-01], [-6.76783513e-01],
          [-1.23527535e-01], [1.04599422e-01], [1.06178562e+00],
          [-1.09977597e-43], [-1.22990539e-90], [-3.14851814e-21],
          [7.33380751e-01]])
]

net.intercepts_ = [
    list([
        1.44847648, 1.47542637, 0.51003163, 0.45278632, -0.0056204, 1.53020242,
        -0.23453891, -0.00187764, -0.21982535, 1.69397764
    ]),
    list([1.9355952])
]

net.n_outputs_ = 1
net.n_layers_ = 3
net.out_activation_ = "identity"


def dotProd(a, b):
    return sum(a[i] * b[i] for i in xrange(len(a)))


def arrToTuple(arr):
    tupArr = [tuple(elem) for elem in arr]
    return tuple(tupArr)


def isEndState(state):
    return state[1] == 0