コード例 #1
0
def testNN():
    train_x, train_y, dev_x, dev_y, test_x, test_y, x_max, y_max = util.loadLinRegData(
        pad=False)
    NN = NeuralNet(eps=10**-8, layer_sizes=[9, 7, 5, 3, 1])
    NN.fit(train_x, train_y, y_max[0], test_x, test_y)
    preds = NN.predict(train_x)
    train_nn_rmse = util.findRMSE(preds, train_y) * y_max[0]
    preds = NN.predict(test_x)
    test_nn_rmse = util.findRMSE(preds, test_y) * y_max[0]
    print('NN RMSE:', test_nn_rmse)
    return train_nn_rmse, test_nn_rmse
コード例 #2
0
ファイル: train.py プロジェクト: mlomb/halite2-bot
def main():
    parser = argparse.ArgumentParser(description="Halite II training")
    parser.add_argument("--model_name", help="Name of the model")
    parser.add_argument("--minibatch_size", type=int, help="Size of the minibatch", default=100)
    parser.add_argument("--steps", type=int, help="Number of steps in the training", default=100)
    parser.add_argument("--data", help="Data directory or zip file containing uncompressed games")
    parser.add_argument("--cache", help="Location of the model we should continue to train")
    parser.add_argument("--games_limit", type=int, help="Train on up to games_limit games", default=1000)
    parser.add_argument("--seed", type=int, help="Random seed to make the training deterministic")
    parser.add_argument("--bot_to_imitate", help="Name of the bot whose strategy we want to learn")
    parser.add_argument("--dump_features_location", help="Location of hdf file where the features should be stored")

    args = parser.parse_args()

    # Make deterministic if needed
    if args.seed is not None:
        np.random.seed(args.seed)
    nn = NeuralNet(cached_model=args.cache, seed=args.seed)

    if args.data.endswith('.zip'):
        raw_data = fetch_data_zip(args.data, args.games_limit)
    else:
        raw_data = fetch_data_dir(args.data, args.games_limit)

    data_input, data_output = parse(raw_data, args.bot_to_imitate, args.dump_features_location)
    data_size = len(data_input)
    training_input, training_output = data_input[:int(0.85 * data_size)], data_output[:int(0.85 * data_size)]
    validation_input, validation_output = data_input[int(0.85 * data_size):], data_output[int(0.85 * data_size):]

    training_data_size = len(training_input)

    # randomly permute the data
    permutation = np.random.permutation(training_data_size)
    training_input, training_output = training_input[permutation], training_output[permutation]

    print("Initial, cross validation loss: {}".format(nn.compute_loss(validation_input, validation_output)))

    curves = []

    for s in range(args.steps):
        start = (s * args.minibatch_size) % training_data_size
        end = start + args.minibatch_size
        training_loss = nn.fit(training_input[start:end], training_output[start:end])
        if s % 25 == 0 or s == args.steps - 1:
            validation_loss = nn.compute_loss(validation_input, validation_output)
            print("Step: {}, cross validation loss: {}, training_loss: {}".format(s, validation_loss, training_loss))
            curves.append((s, training_loss, validation_loss))

    cf = pd.DataFrame(curves, columns=['step', 'training_loss', 'cv_loss'])
    fig = cf.plot(x='step', y=['training_loss', 'cv_loss']).get_figure()

    # Save the trained model, so it can be used by the bot
    current_directory = os.path.dirname(os.path.abspath(__file__))
    model_path = os.path.join(current_directory, os.path.pardir, "models", args.model_name + ".ckpt")
    print("Training finished, serializing model to {}".format(model_path))
    nn.save(model_path)
    print("Model serialized")

    curve_path = os.path.join(current_directory, os.path.pardir, "models", args.model_name + "_training_plot.png")
    fig.savefig(curve_path)
コード例 #3
0
train_x, train_y, dev_x, dev_y, test_x, test_y, x_max, y_max = util.loadLinRegData(
)
'''Play with Neural Net hyperparams'''

layer_architectures = [[15, 3, 1], [19, 7, 3, 1], [9, 7, 5, 3, 1]]

results = []

for arch in layer_architectures:
    cur_result = []
    cur_result.append(arch)

    print("Fitting NN with architecture", arch)
    NN = NeuralNet(layer_sizes=arch)
    NN.fit(train_x, train_y, y_max[0])

    preds = NN.predict(train_x)
    rmse = util.findRMSE(preds, train_y) * y_max[0]
    cur_result.append(rmse)

    preds = NN.predict(dev_x)
    rmse = util.findRMSE(preds, dev_y) * y_max[0]
    cur_result.append(rmse)
    results.append(cur_result)
    print('Train rmse', cur_result[1], 'Dev rmse', cur_result[2])

results.sort(key=lambda x: x[2])
for result in results:
    print('Architecture: \t', result[0], 'Train RMSE: \t', result[1],
          'Dev RMSE: \t', result[2])
コード例 #4
0
        with gzip.open(os.path.join('..', 'data', 'mnist.pkl.gz'), 'rb') as f:
            train_set, valid_set, test_set = pickle.load(f, encoding="latin1")
        X, y = train_set
        Xtest, ytest = test_set

        print(X.shape)

        binarizer = LabelBinarizer()
        Y = binarizer.fit_transform(y)

        hidden_layer_sizes = [50]
        model = NeuralNet(hidden_layer_sizes, sgd=0)

        t = time.time()
        model.fit(X, Y)
        print("Fitting took %d seconds" % (time.time() - t))

        # Comput training error
        yhat = model.predict(X)
        trainError = np.mean(yhat != y)
        print("Training error = ", trainError)

        # Compute test error
        yhat = model.predict(Xtest)
        testError = np.mean(yhat != ytest)
        print("Test error     = ", testError)

    elif task == "2.1":
        W = np.array([[-2, 2, -1], [1, -2, 0]])
        x = np.array([-3, 2, 2])
コード例 #5
0
    print('Total time elapsed: %.2f s' % t_tot)

    # Dump data or load
    joblib.dump(
        grid_search, pickle + 'grid_search_%s_seed%d_nbins%d_pca%d.pkl' %
        (param_string, seed, n_bins, n_pca))
else:
    grid_search = joblib.load(pickle +
                              'grid_search_%s_seed%d_nbins%d_pca%d.pkl' %
                              (param_string, seed, n_bins, n_pca))

best_params = grid_search.best_params_
print(best_params)
best_estimator = NeuralNet()
best_estimator.set_params(**best_params)
best_estimator.fit(X_train, y_train)

best_index = grid_search.best_index_

i_, j_, k_, l_, m_, n_ = np.unravel_index(best_index,
                                          n_params)  # indices of best params

# Results matrix for all parameter combinations
cv_results = grid_search.cv_results_
test_score = cv_results['mean_test_score']
test_score = test_score.reshape(n_params)

print('Mean fit time = %.2e s' % np.mean(cv_results['mean_fit_time']))

print('### GRID SEARCH RESULTS ###')
print('Best params:')
コード例 #6
0
ファイル: main.py プロジェクト: jvpoulos/cs289-hw6
# Initialize output file for results
output_file = open("./results/results_" + cost + ".txt", "wb")
dash = 25

# Initialize NN classifier
network = NeuralNet(n_inputs, n_hidden, n_outputs, cost=cost, gamma=gamma)

output_file.write(dash * "-" + "\n")
output_file.write("Gamma:" + str(gamma) + "\n")
output_file.write("Cost:" + cost + "\n")
output_file.write(dash * "-" + "\n")

# Fit classifier on training data
start = time.time()
network.fit(x_train, y_train, pred_epochs=pred_epochs, max_epoch=max_epoch)
output_file.write("Training time:" + str(np.around((time.time() - start) / 60., 1)) + "minutes\n")
output_file.write(dash * "-" + "\n")

# Plot training error and error rate

plt.plot(network.pred_errors, '-g', label='Error Rate')
plt.plot(network.costs, '-r', label='Training Error')
plt.legend()
plt.xlabel('Epoch')
plt.ylabel('Value')
plt.savefig(img_file)

# Test on validation set
pred_labels = network.predict(x_test)
accuracy = 100 * np.sum(pred_labels == np.argmax(y_test, axis=1)) / float(len(y_test))
コード例 #7
0
train_loss_list = []
train_acc_list = []
test_acc_list = []

model = NeuralNet()

model.add_affine(784, 50)
model.add_active("relu")
model.add_affine(50, 10)

for i in range(iters_num):
    batch_mask = np.random.choice(train_size, batch_size)
    x_batch = x_train[batch_mask]
    t_batch = t_train[batch_mask]

    model.fit(x_batch, t_batch, learning_rate)

    loss = model.loss(x_batch, t_batch)
    train_loss_list.append(loss)

    if i % iter_per_epoch == 0:
        train_acc = model.accuracy(x_train, t_train)
        test_acc = model.accuracy(x_test, t_test)
        train_acc_list.append(train_acc)
        test_acc_list.append(test_acc)
        print(str(i) + "回目")
        print(train_acc, test_acc)

x = np.arange(len(train_loss_list))
plt.plot(x, train_loss_list)
plt.show()
コード例 #8
0
                # select 2 non-agent vehicles
                elif x < len(X_test_dataframes) and other_car_limit_test > 0:
                    x_positions_test = np.append(
                        x_positions_test,
                        X_test_dataframes[x].iloc[:, 6 * i + 2 + 2].to_numpy())
                    y_positions_test = np.append(
                        y_positions_test,
                        X_test_dataframes[x].iloc[:, 6 * i + 2 + 3].to_numpy())

                    other_car_limit_test -= 1

                elif x < len(
                        X_test_dataframes
                ) and other_car_limit_test == 0 and agent_found_flag_test == True:
                    X_test_x[x] = x_positions_test
                    X_test_y[x] = y_positions_test

        model1 = NeuralNet([50], max_iter=10000)
        model1.fit(X_x, y_x)

        y_hat_x = model1.predict(X_test_x).flatten()

        model2 = NeuralNet([30], max_iter=10000)
        model2.fit(X_y, y_y)

        y_hat_y = model2.predict(X_test_y).flatten()

        y_hat = np.insert(y_hat_y, np.arange(len(y_hat_x)), y_hat_x)
        pd.DataFrame(y_hat).to_csv("output.csv")