def main(neural_net_func, data_sets, rate=1.0, max_iterations=10000): verbose = True for name, training_data, test_data in data_sets: print("-" * 40) print("Training on %s data" % (name)) nn = neural_net_func() train(nn, training_data, rate=rate, max_iterations=max_iterations, verbose=verbose) print("Trained weights:") for w in nn.weights: print("Weight '%s': %f" % (w.get_name(), w.get_value())) print("Testing on %s test-data" % (name)) result = test(nn, test_data, verbose=verbose) print("Accuracy: %f" % (result)) if (verbose): print("Finite Difference Check:", finite_difference(nn)) print("Decision Boundary on Training Data:") plot_decision_boundary(nn, -PLOT_SIZE, PLOT_SIZE, -PLOT_SIZE, PLOT_SIZE, training_data) print("Decision Boundary on Test Data:") plot_decision_boundary(nn, -PLOT_SIZE, PLOT_SIZE, -PLOT_SIZE, PLOT_SIZE, test_data) test_result = test(nn, test_data, verbose=verbose) print("Test Data Accuracy: %f" % (test_result)) train_result = test(nn, training_data, verbose=verbose) print("Train Data Accuracy: %f" % (train_result))
def main(neural_net_func, data_sets, max_iterations=10000): verbose = True for name, training_data, test_data in data_sets: print "-" * 40 print "Training on %s data" % (name) nn = neural_net_func() train(nn, training_data, max_iterations=max_iterations, verbose=verbose) print "Trained weights:" for w in nn.weights: print "Weight '%s': %f" % (w.get_name(), w.get_value()) print "Testing on %s test-data" % (name) result = test(nn, test_data, verbose=verbose) print "Accuracy: %f" % (result)
def main(neural_net_func, data_sets, max_iterations=10000): verbose = True for name, training_data, test_data in data_sets: print "-"*40 print "Training on %s data" %(name) nn = neural_net_func() train(nn, training_data, max_iterations=max_iterations, verbose=verbose) print "Trained weights:" for w in nn.weights: print "Weight '%s': %f"%(w.get_name(),w.get_value()) print "Testing on %s test-data" %(name) result = test(nn, test_data, verbose=verbose) print "Accuracy: %f"%(result)
def main(neural_net_func, data_sets, rate=1.0, max_iterations=10000): verbose = True for name, training_data, test_data in data_sets: print("-"*40) print("Training on %s data" %(name)) nn = neural_net_func() train(nn, training_data, rate=rate, max_iterations=max_iterations, verbose=verbose) finite_difference(nn) print("Trained weights:") for w in nn.weights: print("Weight '%s': %f"%(w.get_name(),w.get_value())) print("Testing on %s test-data" %(name)) # plot_decision_boundary(nn, 0,4,0,4) result = test(nn, test_data, verbose=verbose) print("Accuracy: %f"%(result))
def main(neural_net_func, data_sets, rate=1.0, max_iterations=10000): verbose = True for name, training_data, test_data in data_sets: print("-" * 40) print("Training on %s data" % (name)) nn = neural_net_func() train(nn, training_data, rate=rate, max_iterations=max_iterations, verbose=verbose) print("Trained weights:") for w in nn.weights: print("Weight '%s': %f" % (w.get_name(), w.get_value())) print("Testing on %s train-data" % (name)) result = test(nn, training_data, verbose=verbose) print("Accuracy on train data: %f" % (result)) print("Testing on %s test-data" % (name)) result = test(nn, test_data, verbose=verbose) print("Accuracy on test data: %f" % (result))
def analyze_portfolio(): uid = get_id_from_session(request.headers.get('session')) if not uid: return jsonify({"message": "Bad session :("}), 400 p = Parser( list(transactions.find({'_id': uid}, { 'transaction': 1, '_id': 0 }))[0], list(holdings.find({'_id': uid}, { 'holding': 1, '_id': 0 }))[0], 25) pv, direction = predict(p.get_neural_net_attrs(), train()) r = Recommender(direction.tolist()) recommendation = r.get_recommendation() return jsonify({ 'fitness': pv[0], 'recommendation': recommendation[0], 'link': recommendation[1] })
import neural_net as nn import corpus_parser as corpus import performAction as bot if __name__ == '__main__': nn.train() print('Conversation Started!') # main loop done = False while (not done): query = input(">>> ") if query == 'exit': done = True else: res = nn.response(query) print(bot.performBotAction(res))
# K-Nearest neighbors if model == 'nearest' and phase == 'train': model = knn.train(image_list) serialize_to_file(model, model_file) elif model == 'nearest' and phase == 'test': model = deserialize_from_file(model_file) knn.test(image_list, model) # ADA boost elif model == "adaboost" and phase == "train": params = Adaboost(image_list).adaboost() serialize_to_file(params, model_file) elif model == "adaboost" and phase == "test": params = deserialize_from_file(model_file) Adaboost(image_list).adaboost_test(image_list, params) # Neural net elif model == 'nnet' and phase == 'train': net = neural_net.train(image_list) serialize_to_file(net, model_file) elif model == 'nnet' and phase == 'test': net = deserialize_from_file(model_file) neural_net.test(net, image_list) print 'End time', time() if phase == 'test': accuracy = get_accuracy(image_list) save_output(image_list, 'output.txt') print 'Accuracy ->', accuracy
''' train.py ''' import neural_net as nn NEURAL_NET_FILENAME = input('Neural network init filename: ') NUM_INPUT, NUM_HIDDEN, NUM_OUTPUT, WEIGHTS = \ nn.read_neural_net_file(NEURAL_NET_FILENAME) TRAIN_FILENAME = input('Neural network training data filename: ') TRAINING_DATA = nn.read_data_file(TRAIN_FILENAME) LEARNING_RATE = float(input('Learning rate: ')) EPOCHS = int(input('Number of epochs: ')) WEIGHTS = nn.train(WEIGHTS, TRAINING_DATA, LEARNING_RATE, EPOCHS) TRAINED_FILENAME = input('Neural network trained filename: ') nn.write_trained_file(TRAINED_FILENAME, NUM_INPUT, NUM_HIDDEN, NUM_OUTPUT, WEIGHTS)
import neural_net as nn import numpy as np from random import random # Libraries necessary to perform the task. ################################################## mlp = nn.MLP(2, [5], 1) # [weights, activations, derivatives] inputs = np.array([[random() / 2 for _ in range(2)] for _ in range(3000)]) target = np.array([[i[0] + i[1]] for i in inputs]) ################################################## print(50 * "-") print("Training The Neural Network!") print() nn.train(mlp, inputs, target, 50, 1) print() print(50 * "-") print("Testing The Neural Network!") print() input = np.array([0.3, 0.2]) output = nn.forward_propagate(input, mlp[0], mlp[1]) print() print("Prediction of {} + {} is {}".format(input[0], input[1], output[0]))
def init(): nn.train()
if len(sys.argv) != 5: raise Exception("Error: expected 4 arguments") if sys.argv[1] == 'train': # Train your model # train train_file.txt model_file.txt [model] train_image_ids, train_data = read_data(sys.argv[2]) if sys.argv[4] == 'nearest': # KNN KNN.train_data(sys.argv[2], sys.argv[3]) elif sys.argv[4] == 'tree': # Decision tree DecisionTree.train(train_data, sys.argv[3]) elif sys.argv[4] == 'nnet' or sys.argv[4] == 'best': # Neural nets is the best case neural_net.train(train_data, sys.argv[3]) elif sys.argv[1] == 'test': # Test against your model_file.txt # test test_file.txt model_file.txt [model] test_image_ids, test_data = read_data(sys.argv[2]) if sys.argv[4] == 'nearest': # KNN train_image_ids, train_data = read_data(sys.argv[3]) KNN.start(train_data, test_data, test_image_ids) elif sys.argv[4] == 'tree': # Decision tree DecisionTree.test(test_data, sys.argv[3], test_image_ids) elif sys.argv[4] == 'nnet' or sys.argv[4] == 'best': # Neural Nets is the best case neural_net.test(sys.argv[3], test_data, test_image_ids)
targets.append(x) targets = np.array(targets) inputs = np.array(p[0]) # Spliting the data for training and testing X_train, X_test, y_train, y_test = train_test_split(inputs, targets, test_size=0.2, random_state=42, stratify=targets) print() print(50 * "-") print("Training The Neural Neural Network") nn.train(MLP, X_train, y_train, 25, 0.1) print(50 * "-") print("Testing the Neural Network") outputs = nn.forward_propagate(X_test, MLP[0], MLP[1]) print() print() pred_y = [] for output in outputs: output = list(output) pred_labels = [0, 0, 0] pred_labels[output.index(max(output))] = 1.0 pred_y.append(pred_labels)
elif kind == 'aleatoric': model = aleatoric elif kind == 'combined': model = combined else: print('kind can be epistemic, aleatoric or combined') exit() net = model.Net(28 * 28, 10, 1024, 2) net.apply(neural_net.init_weights) criterion = model.Loss() predict = model.predict kwargs = dict(lr=1e-4, weight_decay=0.0001) if kind == 'aleatoric' else dict( lr=1e-4) optimizer = torch.optim.Adam(net.parameters(), **kwargs) scheduler = ExponentialLR(optimizer, gamma=0.9999) net.train() for epoch in range(10): train_losses = neural_net.train(train_loader, net, criterion, optimizer, scheduler) print('Train loss = %s' % (sum(train_losses) / len(train_losses))) score, loss = neural_net.test(test_loader, predict, net, criterion) print('Testing: Accuracy = %.2f%%, Loss %.4f' % (score * 100, loss)) torch.save(net, '%s.pt' % kind)