def main(neural_net_func, data_sets, rate=1.0, max_iterations=10000):
    verbose = True
    for name, training_data, test_data in data_sets:
        print("-" * 40)
        print("Training on %s data" % (name))
        nn = neural_net_func()
        train(nn,
              training_data,
              rate=rate,
              max_iterations=max_iterations,
              verbose=verbose)
        print("Trained weights:")
        for w in nn.weights:
            print("Weight '%s': %f" % (w.get_name(), w.get_value()))
        print("Testing on %s test-data" % (name))
        result = test(nn, test_data, verbose=verbose)
        print("Accuracy: %f" % (result))
        if (verbose):
            print("Finite Difference Check:", finite_difference(nn))
            print("Decision Boundary on Training Data:")
            plot_decision_boundary(nn, -PLOT_SIZE, PLOT_SIZE, -PLOT_SIZE,
                                   PLOT_SIZE, training_data)
            print("Decision Boundary on Test Data:")
            plot_decision_boundary(nn, -PLOT_SIZE, PLOT_SIZE, -PLOT_SIZE,
                                   PLOT_SIZE, test_data)
            test_result = test(nn, test_data, verbose=verbose)
            print("Test Data Accuracy: %f" % (test_result))
            train_result = test(nn, training_data, verbose=verbose)
            print("Train Data Accuracy: %f" % (train_result))
Example #2
0
def get_Qmax(state, minimal_actions, nn):
    Qvalues = nn.test(state)[0, minimal_actions]
    index = tc.argmax(Qvalues)
    # print('index :\n', index)
    # print('type(Qvalues :\n', Qvalues)
    Amax = tc.Tensor([minimal_actions[index]]).long()
    Qmax = Qvalues[index]
    # print('Qmax :\n', Qmax)
    return Amax, Qmax
Example #3
0
def main(neural_net_func, data_sets, rate=1.0, max_iterations=10000):
    verbose = True
    for name, training_data, test_data in data_sets:
        print("-" * 40)
        print("Training on %s data" % (name))
        nn = neural_net_func()
        train(nn,
              training_data,
              rate=rate,
              max_iterations=max_iterations,
              verbose=verbose)
        print("Trained weights:")
        for w in nn.weights:
            print("Weight '%s': %f" % (w.get_name(), w.get_value()))
        print("Testing on %s train-data" % (name))
        result = test(nn, training_data, verbose=verbose)
        print("Accuracy on train data: %f" % (result))
        print("Testing on %s test-data" % (name))
        result = test(nn, test_data, verbose=verbose)
        print("Accuracy on test data: %f" % (result))
Example #4
0
def main(neural_net_func, data_sets, max_iterations=10000):
    verbose = True
    for name, training_data, test_data in data_sets:
        print "-" * 40
        print "Training on %s data" % (name)
        nn = neural_net_func()
        train(nn, training_data, max_iterations=max_iterations, verbose=verbose)
        print "Trained weights:"
        for w in nn.weights:
            print "Weight '%s': %f" % (w.get_name(), w.get_value())
        print "Testing on %s test-data" % (name)
        result = test(nn, test_data, verbose=verbose)
        print "Accuracy: %f" % (result)
Example #5
0
def main(neural_net_func, data_sets, max_iterations=10000):
    verbose = True
    for name, training_data, test_data in data_sets:
        print "-"*40
        print "Training on %s data" %(name)
        nn = neural_net_func()
        train(nn, training_data, max_iterations=max_iterations,
              verbose=verbose)
        print "Trained weights:"
        for w in nn.weights:
            print "Weight '%s': %f"%(w.get_name(),w.get_value())
        print "Testing on %s test-data" %(name)
        result = test(nn, test_data, verbose=verbose)
        print "Accuracy: %f"%(result)
def main(neural_net_func, data_sets, rate=1.0, max_iterations=10000):
    verbose = True
    for name, training_data, test_data in data_sets:
        print("-"*40)
        print("Training on %s data" %(name))
        nn = neural_net_func()
        train(nn, training_data, rate=rate, max_iterations=max_iterations,
              verbose=verbose)
        finite_difference(nn)
        print("Trained weights:")
        for w in nn.weights:
            print("Weight '%s': %f"%(w.get_name(),w.get_value()))
        print("Testing on %s test-data" %(name))
        # plot_decision_boundary(nn, 0,4,0,4)
        result = test(nn, test_data, verbose=verbose)
        print("Accuracy: %f"%(result))
Example #7
0
def ep_greedy(ep, state, minimal_actions, nn):

    Qvalues = nn.test(state)[0, minimal_actions]
    num = random.random()

    index = tc.argmax(Qvalues).type(tc.long)
    Qmax = Qvalues[index]
    Amax = tc.Tensor([minimal_actions[index]]).long()

    if num < ep + (1 - ep) / len(minimal_actions):
        action_index = random.randrange(0, len(minimal_actions) - 1)
        if action_index >= index:
            action_index = action_index + 1
        return tc.Tensor([minimal_actions[action_index]
                          ]).long(), Qvalues[action_index]
    else:
        return Amax, Qmax
Example #8
0
    # K-Nearest neighbors
    if model == 'nearest' and phase == 'train':
        model = knn.train(image_list)
        serialize_to_file(model, model_file)
    elif model == 'nearest' and phase == 'test':
        model = deserialize_from_file(model_file)
        knn.test(image_list, model)

    # ADA boost
    elif model == "adaboost" and phase == "train":
        params = Adaboost(image_list).adaboost()
        serialize_to_file(params, model_file)
    elif model == "adaboost" and phase == "test":
        params = deserialize_from_file(model_file)
        Adaboost(image_list).adaboost_test(image_list, params)

    # Neural net
    elif model == 'nnet' and phase == 'train':
        net = neural_net.train(image_list)
        serialize_to_file(net, model_file)
    elif model == 'nnet' and phase == 'test':
        net = deserialize_from_file(model_file)
        neural_net.test(net, image_list)

    print 'End time', time()

    if phase == 'test':
        accuracy = get_accuracy(image_list)
        save_output(image_list, 'output.txt')
        print 'Accuracy ->', accuracy
def run_algo(ticker, spy):
    df = neural_net.test(ticker, model)
    df, beta, risk, sharpe, ret, profit = backtest.trade(df)
    visualization.algo_vs_market(df, spy)
    visualization.visualize_dollar_profits(df)
        raise Exception("Error: expected 4 arguments")

    if sys.argv[1] == 'train':
        # Train your model
        # train train_file.txt model_file.txt [model]
        train_image_ids, train_data = read_data(sys.argv[2])
        if sys.argv[4] == 'nearest':
            # KNN
            KNN.train_data(sys.argv[2], sys.argv[3])
        elif sys.argv[4] == 'tree':
            # Decision tree
            DecisionTree.train(train_data, sys.argv[3])
        elif sys.argv[4] == 'nnet' or sys.argv[4] == 'best':
            # Neural nets is the best case
            neural_net.train(train_data, sys.argv[3])

    elif sys.argv[1] == 'test':
        # Test against your model_file.txt
        # test test_file.txt model_file.txt [model]
        test_image_ids, test_data = read_data(sys.argv[2])
        if sys.argv[4] == 'nearest':
            # KNN
            train_image_ids, train_data = read_data(sys.argv[3])
            KNN.start(train_data, test_data, test_image_ids)
        elif sys.argv[4] == 'tree':
            # Decision tree
            DecisionTree.test(test_data, sys.argv[3], test_image_ids)
        elif sys.argv[4] == 'nnet' or sys.argv[4] == 'best':
            # Neural Nets is the best case
            neural_net.test(sys.argv[3], test_data, test_image_ids)
x_train = proc.st_scale(x_train)

# train_data = proc.ZCA(train_data)
# x_train = proc.PCA_reduction(x_train, n_pca)

# Trainning process using K-Fold
if training:
    if model_type == "net":
        models = net.kfold(model_params, x_train, y_train, n_folds, verbose,
                           grad_check)
    else:
        models = logistic.kfold(model_params, x_train, y_train, n_folds,
                                verbose)

if not training:
    # Pre-prossesing test
    x_test = proc.normalize_l2(x_test)
    x_test = proc.st_scale(x_test)

    if model_type == "net":
        # Testing process on neural net
        net.test(model_params, x_train, y_train, x_test, y_test)
    else:
        print("Testing Logistic Regression on Test dataset")
        # Testing process on logistic regression
        model = logistic.create_model(*model_params)
        model.fit(x_train, y_train.ravel())
        score = model.score(x_test, y_test.ravel())
        metrics.print_acc(score)
elif kind == 'aleatoric':
    model = aleatoric
elif kind == 'combined':
    model = combined
else:
    print('kind can be epistemic, aleatoric or combined')
    exit()

net = model.Net(28 * 28, 10, 1024, 2)
net.apply(neural_net.init_weights)

criterion = model.Loss()

predict = model.predict

kwargs = dict(lr=1e-4, weight_decay=0.0001) if kind == 'aleatoric' else dict(
    lr=1e-4)
optimizer = torch.optim.Adam(net.parameters(), **kwargs)

scheduler = ExponentialLR(optimizer, gamma=0.9999)

net.train()
for epoch in range(10):
    train_losses = neural_net.train(train_loader, net, criterion, optimizer,
                                    scheduler)
    print('Train loss = %s' % (sum(train_losses) / len(train_losses)))

    score, loss = neural_net.test(test_loader, predict, net, criterion)
    print('Testing: Accuracy = %.2f%%, Loss %.4f' % (score * 100, loss))

torch.save(net, '%s.pt' % kind)