Beispiel #1
0
def neural_network_regression(data):
    layers0 = [('input', InputLayer),
               ('dense0', DenseLayer),
               ('dense1', DenseLayer),
               ('dense2', DenseLayer),
               ('dense3', DenseLayer),
               ('dense4', DenseLayer),
               ('output', DenseLayer)]

    net0 = NeuralNet(layers=layers0,
                     input_shape=(None, len(data.X_train[0])),
                     dense0_num_units=400,
                     dense0_nonlinearity=rectify,
                     dense1_num_units=200,
                     dense1_nonlinearity=rectify,
                     dense2_num_units=100,
                     dense2_nonlinearity=rectify,
                     dense3_num_units=50,
                     dense3_nonlinearity=rectify,
                     dense4_num_units=25,
                     dense4_nonlinearity=rectify,
                     output_num_units=1,
                     output_nonlinearity=None,

                     update=nesterov_momentum,
                     update_learning_rate=0.00001,
                     update_momentum=0.9,
                     regression=True,

                     on_epoch_finished=[
                         EarlyStopping(patience=20),
                         AcceptLoss(min=0.01)
                     ],

                     verbose=VERBOSE,
                     max_epochs=100000)

    # Provide our own validation set
    def my_split(self, X, y, eval_size):
        return data.X_train, data.X_validate, data.y_train_nn, data.y_validate_nn

    net0.train_split = types.MethodType(my_split, net0)

    return net0
# Compute the z-scores for both train and validation.  However, use mean and standard deviation for training
# on both.  This is customary because we trained on this standard deviation and mean.  Additionally, our
# prediction set might too small to calculate a meaningful mean and standard deviation.
X_train_z = zscore(X_train, train_mean, train_sdev) #scipy.stats.mstats.zscore(X_train)
X_validate_z = zscore(X_validate, train_mean, train_sdev)  #scipy.stats.mstats.zscore(X_validate)

#These can be used to check my zscore calc to numpy
#print(X_train_z)
#print(scipy.stats.mstats.zscore(X_train))

# Provide our own validation set
def my_split(self, X, y, eval_size):
    return X_train_z,X_validate_z,y_train,y_validate

net0.train_split = types.MethodType(my_split, net0)

# Train the network
net0.fit(X_train_z,y_train)

# Predict the validation set
pred_y = net0.predict(X_validate_z)

# Display predictions and count the number of incorrect predictions.
species_names = ['setosa','versicolour','virginica']

count = 0
wrong = 0
for element in zip(X_validate,y_validate,pred_y):
    print("Input: sepal length: {}, sepal width: {}, petal length: {}, petal width: {}; Expected: {}; Actual: {}".format(
        element[0][0],element[0][1],element[0][2],element[0][3],