Пример #1
0
def train_v0():
    print('\n<<<<<<<TRAIN_V0>>>>>>>>\n')
    # import data
    # create neural nets
    # sort data into appropriate neural net
    big_data = np.loadtxt(open(
        "/Users/conradmilhaupt/Documents/ScientistSimulation2/data/nn/V0_data.csv",
        "rb"),
                          delimiter=",",
                          skiprows=1)  # age, q, T, max, mean, sds, impact_left
    net_list = []

    for i in range(TP_ALIVE):
        print(
            '\n\n\n--------------------------NEURAL NET V0_{}----------------------------\n\n'
            .format(i))
        net = nn.NeuralNet("V0_{}".format(i))
        data = big_data[np.where(big_data[:, 0] == i)]

        net.set_train_data(
            data[:,
                 1:])  # slice out first column, which is the scientist's age
        net.build_model()
        net.train_model()
        net.save_model()

        net.check1()
        # net.check2()

        net_list.append(net)

    print("\n\n\n\n\nDONE TRAINING V0's\n\n\n\n\n")
Пример #2
0
def main():
    alpha = 0.5
    data = np.matrix("0. 1. 1.; 0. 0. 0.; 1. 0. 1.; 1. 1. 0.")
    theta = .01
    iter_limit = 50000
    mynet = nn.NeuralNet((2, 2, 1), sf.binary, sf.binary_prime, alpha)
    n = data.shape[0]
    total_count = 0
    k = -1
    fh = open('nn_xor.csv', 'w')
    plot_data = np.matrix("0,0")
    while True:
        total_count += 1
        k = (k+1) % n
        pattern = np.asarray(data[k,1:]).squeeze()
        teacher = [data[k,0]]
        error = mynet.train(pattern, teacher, theta)
        if (total_count % 10) == 0:
            plot_data = np.vstack((plot_data, np.matrix((total_count, error))))
            msg = ','.join([str(total_count), str(error)])
            fh.write(msg)
            fh.write('\n')
        if (total_count > iter_limit) or (error < theta):
            print("Stopped at iter count=>", total_count)
            break

    mynet.dt.tofile('bub_xor.txt')
    fh.close()
    plt.plot(plot_data[0:,0], plot_data[0:,1])
    plt.show()
Пример #3
0
def performForLearningRate(eta):
    epochsPerformance = []

    print("START : Starting to train, eta:", eta)
    net = neural_net.NeuralNet(nnSizes,
                               seed=seed,
                               debug=False,
                               activationFunc=activationFunc,
                               costFunc=costFunc,
                               useSoftMax=useSoftMax)
    for epoch in range(numOfEpochs2Calc):
        net.SGD(trainingData, 1, mini_batch_size=miniBatchSize, eta=eta)
        performance = net.evaluate(testData)
        if debug:
            print("PERFORMANCE : Learning rate:", eta, "epoch:",
                  str(epoch + 1), "performance:", performance)
        epochsPerformance.append(performance)
        if debugWB:
            addNetworkSettings2Json(net.weights, net.biases, eta,
                                    activationFunc, nnSizes, epoch,
                                    miniBatchSize)
        print("EPOCH : Learning rate:", eta, "epoch:", str(epoch + 1),
              "finished")

    print("FINISH : Learning rate:", eta, "finished")

    return epochsPerformance
Пример #4
0
    def test_build(self):

        #TODO REMOVE THIS RETURN
        return

        network = neural_net.NeuralNet(3, 1, [2, 4])

        # Checks if layers sizes are correct
        self.assertEqual(len(network.neurons[0]), 2 + 1)
        self.assertEqual(len(network.neurons[1]), 4 + 1)

        # Checks if connections sizes are correct
        self.assertEqual(len(network.connections[0]), 2 + 1)
        self.assertEqual(len(network.connections[1]), 4 + 1)
        self.assertEqual(len(network.connections[2]), 1)

        # Check connections from Hidden Layer 1
        self.assertEqual(len(network.connections[0][0]), 3 + 1)
        self.assertEqual(len(network.connections[0][1]), 3 + 1)
        self.assertEqual(len(network.connections[0][2]), 3 + 1)

        # Check connections from Hidden Layer 2
        self.assertEqual(len(network.connections[1][0]), 2 + 1)
        self.assertEqual(len(network.connections[1][1]), 2 + 1)
        self.assertEqual(len(network.connections[1][2]), 2 + 1)
        self.assertEqual(len(network.connections[1][3]), 2 + 1)

        # Check connections from output layer
        self.assertEqual(len(network.connections[2][0]), 4 + 1)

        pass
Пример #5
0
def test_mnist():
    images = utility.read_idx_images(
        '../../mnist_data/train-images.idx3-ubyte')
    labels = utility.read_idx_labels(
        '../../mnist_data/train-labels.idx1-ubyte')
    labels = utility.make_onehot(labels, np.arange(10))
    return train.train_regular(nn.NeuralNet((784, 30, 10)), images, labels)
Пример #6
0
    def train(self):
        """
        Train the LL_Bayesian_NN model using the available training dataset
        This will consist of roughly two important tasks
        First, NN will perform the task of feature extraction 
        Second, Bayesian Linear Regressor will condition the above obtained features.
        This BLR layer is going to help us in making predictions and quantify the epistemic uncertainity
        associated with our predictions.
        """
        # Train the neural network for feature extraction
        NN = neural_net.NeuralNet(self.X, self.y, self.rng,
                                  self.normalize_input, self.normalize_output)
        NN.train_nn(self.num_epochs, self.batch_size,
                   self.n_units_1, self.n_units_2, self.n_units_3)
        ll_out = NN.extract_last_layer_nn_out(self.X)

        self.NN = NN

        # Bayesian Linear Regression (with Automatic Relevance determination to infer hyperparameters)
        LinearRegressor = bayesian_linear_regressor.BayesianARD(ll_out, self.y,
                                                                self.normalize_output_lr,
                                                                self.lr_intercept)
        m, S, sigma, alpha = LinearRegressor.train()

        #self.m = m
        #self.S = S
        #self.sigma = sigma
        #self.alpha = alpha

        self.LinearRegressor = LinearRegressor
        
        np.savetxt('../nn_results/ll_out_E' + str(self.num_epochs) + 'N' + str(self.X.shape[0]) + '.txt', ll_out)
Пример #7
0
def main():
    alpha = 0.5
    mynet = nn.NeuralNet((2, 3, 2), sf.binary, sf.binary_prime, alpha)
    mynet.dt.set_test_weights()
    myin = np.array((1., 2.))
    myteach = np.array((2., 1.))
    error = mynet.train(myin, myteach, 1e-3)
    mynet.dt.prettyprint()
    print("Error=>", error)
Пример #8
0
    def test_gradient(self):
        network = neural_net.NeuralNet(3, 1, [2, 4])
        network.back_propagation([1, 2, 3], [1])

        for layer_no, layer in enumerate(network.old_gradient_list):
            for neuron_no, gradients in enumerate(layer):
                for gradient_no, _ in enumerate(gradients):
                    self.assertTrue(
                        isclose(*network.gradient_verification(
                            layer_no, neuron_no, gradient_no, [1, 2, 3], [1]),
                                rel_tol=0.01))
Пример #9
0
 def SetupTesting(self, test_examples, test_labels, skeleton):
     """Set tensorflow graph for training."""
     # Builds the graph.
     predictions, _ = NN.NeuralNet(skeleton,
                                   self.learning_params,
                                   test_examples,
                                   tf_params=self.trainable)
     self.test_loss, self.test_accuracy = self.SetupLoss(
         predictions, test_labels)
     # make summary writers for tensorboard
     tf.summary.scalar('test_loss', self.test_loss)
     tf.summary.scalar('test_accuracy', self.test_accuracy)
Пример #10
0
    def __init__(self, board=None, nnet=None, mcts=None, batch_size=10):
        """Pass the baord class (defaults to GameBoard), the neural net instance
        and the MCTS instance
        """
        logging.basicConfig(filename="dojo.log", level=logging.DEBUG,
            filemode='w')

        self.board = board or GameBoard()
        self.nnet = nnet or neural_net.NeuralNet()
        self.tree = mcts or MCTS(3)

        self.training_examples = []
        self.batch_size = batch_size
Пример #11
0
def test_1in_1out():
    def target_function_1D(x):
        return np.abs(np.sin(np.multiply(
            x, 4)))  # inputs from 0 to 1, outputs from 0 to 1

    NUM_BATCHES = 4000
    NUM_CASES = 10
    SIZES = (1, 10, 1)
    net = nn.NeuralNet(SIZES)
    inputs = np.divide(np.arange(NUM_CASES * NUM_BATCHES),
                       NUM_CASES * NUM_BATCHES)
    labels = target_function_1D(inputs)
    indexes = np.arange(NUM_CASES * NUM_BATCHES)
    np.random.shuffle(indexes)
    inputs = xr.DataArray(inputs[indexes].reshape(
        (NUM_BATCHES * NUM_CASES, 1)),
                          dims=('cases', 'inputs'))
    labels = xr.DataArray(labels[indexes].reshape(
        (NUM_BATCHES * NUM_CASES, 1)),
                          dims=('cases', 'labels'))
    for new_net, count, _, _, _ in net.train_yield(inputs,
                                                   labels,
                                                   training_rate=3.0):
        if count % 500 == 0:
            x = np.linspace(0, 1, num=50)
            xgrid = xr.DataArray(x, dims=('cases'), coords={
                'cases': x
            }).expand_dims('inputs')
            net_outputs = new_net.output_only(
                new_net.pass_forward(xgrid)).squeeze()
            goal_outputs = target_function_1D(xgrid)
            plt.scatter(x, net_outputs)
            plt.scatter(i.squeeze().rename(cases='inputs'),
                        l.squeeze().rename(cases='outputs'))
            goal_outputs.plot()
            net_outputs.plot()
            plt.show()
    NUM_TESTS = 100
    test_inputs = np.divide(np.arange(NUM_TESTS), NUM_TESTS)
    xarray_inputs = xr.DataArray(test_inputs.reshape((NUM_TESTS, 1)),
                                 dims=('cases', 'inputs'))
    goal_outputs = target_function_1D(xarray_inputs).squeeze()
    net_outputs = net.pass_forward(xarray_inputs)[nn.mkey(
        len(SIZES) - 1, 'post_activation')].squeeze()
    goal_outputs.assign_coords(cases=test_inputs)
    net_outputs.assign_coords(cases=test_inputs)
    goal_outputs.plot()
    net_outputs.plot()
Пример #12
0
def test_2in_1out():
    def func_probability(xarray_input):
        STEEPNESS = 100
        W = 1 * STEEPNESS
        B = 0.7 * STEEPNESS
        return np.divide(
            1,
            np.add(
                1,
                np.exp(
                    np.multiply(
                        -1,
                        np.subtract(
                            np.multiply(xarray_input.isel(inputs=0), W), B)))))

    def plot_tensor_2D(tensor):
        NUM = 50
        x = np.linspace(0, 1, num=NUM)
        y = np.linspace(0, 1, num=NUM)
        xgrid, ygrid = np.meshgrid(x, y)
        input_grid = xr.DataArray(
            xr.concat([
                xr.DataArray(xgrid, dims=('input1', 'input2')),
                xr.DataArray(ygrid, dims=('input1', 'input2'))
            ],
                      dim='inputs'))
        input_grid = input_grid.assign_coords(input1=x, input2=y)
        outputs = tensor.pass_forward_output_only(input_grid)
        outputs.plot()

    NUM_BATCHES = 100
    NUM_CASES = 100
    NUM_DIMS = 2
    inputs = xr.DataArray(np.random.rand(NUM_BATCHES * NUM_CASES, NUM_DIMS),
                          dims=('cases', 'inputs'))
    labels = xr.DataArray(np.greater(func_probability(inputs),
                                     np.random.rand(NUM_BATCHES * NUM_CASES)),
                          dims=('cases'))
    labels = labels.expand_dims('labels')
    net = nn.NeuralNet((2, 1))
    for new_net, count, _, _, _ in net.train_yield(inputs,
                                                   labels,
                                                   training_rate=5.0):
        if count % 20 == 0:
            plot_tensor_2D(new_net)
            plt.scatter(i.isel(inputs=0), i.isel(inputs=1), c=l.isel(labels=0))
            plt.show()
Пример #13
0
def main():
    training_data = md.circle_data(40)
    training_data = md.convert_data(training_data)

    # Make a test data set
    test_data = md.read_data('homework1_data.csv')
    test_data = md.convert_data(test_data)

    alpha = 0.5
    theta = .0001
    iter_limit = 50000
    mynet = nn.NeuralNet((2, 2, 1), sf.bipolar, sf.bipolar_prime, alpha)
    n = training_data.shape[0]
    total_count = 0
    k = -1
    plot_data = np.matrix("0 0")
    fh = open('nn_quad.csv', 'w')
    while True:
        total_count += 1
        k = (k + 1) % n
        #k = np.random.randint(0, n)
        pattern = np.asarray(training_data[k, 1:]).squeeze()
        teacher = [training_data[k, 0]]
        error = mynet.train(pattern, teacher, theta)
        if (total_count % 10) == 0:
            plot_data = np.vstack((plot_data, np.matrix((total_count, error))))
            msg = ','.join([str(total_count), str(error)])
            fh.write(msg)
            fh.write('\n')
        if (total_count > iter_limit) or (error < theta):
            print("Stopped at iter count ", total_count,
                  "with a final error of ", error)
            break

    mynet.dt.tofile('bub_quad.txt')
    fh.close()
    plt.plot(plot_data[0:, 0], plot_data[0:, 1])
    plt.show()

    counter = check_net(test_data, mynet.fwd)
    print('Assigned', counter, 'patterns of', test_data.shape[0], 'correctly.')
    def train(self):
        """ Using the stored dataset and architecture, trains the neural net to 
        perform feature extraction, and the linear regressor to perform prediction
        and confidence interval computation.
        """
        neural_net = nn.NeuralNet(self.__architecture, self.__dataset)
        neural_net.train()
        self.__W, self.__B = neural_net.extract_params()
        self.__nn_pred = neural_net.e.network(self.__domain)

        # Extract features
        train_X = self.__dataset[:, :-1]
        train_Y = self.__dataset[:, -1:]
        train_features = self.extract_features(train_X)
        domain_features = self.extract_features(self.__domain)
        lm_dataset = np.concatenate((train_features, train_Y), axis=1)

        # Train and predict with linear_regressor
        linear_regressor = lm.LinearRegressor(lm_dataset, intercept=False)
        linear_regressor.train()
        self.__pred, self.__hi_ci, self.__lo_ci = linear_regressor.predict(
            domain_features)
Пример #15
0
    def SetupTraining(self, examples, labels, skeleton):
        """Set tensorflow graph for training."""
        # Builds the graph.
        predictions, self.trainable = NN.NeuralNet(skeleton,
                                                   self.learning_params,
                                                   examples)

        self.loss, self.accuracy = self.SetupLoss(predictions, labels)

        # Set trained variables.
        var_list = self.trainable
        if self.learning_params.trained_layers:
            var_list = []
            for i in str.split(self.learning_params.trained_layers, ','):
                var_list.append(self.trainable[2 * int(i)])
                var_list.append(self.trainable[2 * int(i) + 1])

        # Set the optimizer.
        self.global_step = tf.Variable(0, trainable=False)
        num_batches_per_epoch = (self.learning_params.number_of_examples /
                                 self.learning_params.batch_size)
        decay_steps = int(num_batches_per_epoch *
                          self.learning_params.epochs_per_decay)
        self.lr = tf.train.exponential_decay(
            self.learning_params.learning_rate,
            self.global_step,
            decay_steps,
            self.learning_params.decay_rate,
            staircase=True)
        optimizer = utils.GetOptimizer(self.learning_params, self.lr)
        self.update = optimizer.minimize(self.loss,
                                         var_list=var_list,
                                         gate_gradients=True,
                                         global_step=self.global_step)

        # make summary writers for tensorboard
        tf.summary.scalar('loss', self.loss)
        tf.summary.scalar('accuracy', self.accuracy)
Пример #16
0
def test_compare():
    SIZES = (3, 2, 1)
    net = nn.NeuralNet(SIZES, func_fill=np.ones)
    inputs = xr.DataArray(np.ones((3, )), dims=('inputs'))
    activations = net.pass_forward(inputs)
    pre_activations = [
        activations[nn.mkey(i, 'pre_activation')].values.reshape((1, SIZES[i]))
        for i in range(len(SIZES))
    ]
    gradient = np.subtract(
        np.ones((1, )),
        activations[nn.mkey(len(SIZES) - 1,
                            'post_activation')]).values.reshape((1, 1))
    old_output = nn_old.pass_back(
        pre_activations, gradient,
        nn_old.create_tensor(SIZES, fillFunction=np.ones))
    output = net.pass_back(activations, xr.DataArray([1], dims=('labels')))
    for layer in range(len(SIZES) - 1):
        print('LAYER', layer, 'WEIGHTS:\nold:',
              old_output['w'][layer].tolist(), '\nnew:',
              output[nn.mkey(layer, 'weights')].values.tolist(), '\nLAYER',
              layer, 'BIASES:\nold:', old_output['b'][layer].tolist(),
              '\nnew:', output[nn.mkey(layer, 'biases')].values.tolist(), '\n')
Пример #17
0
def train_v1():
    print('\n<<<<<<<TRAIN_V1>>>>>>>>\n')
    # load saved V0 neural net models
    print('loading Brains...')
    V0_net_list = []
    for i in range(TP_ALIVE):
        net = brain.Brain.load_brain("V0_{}".format(i))
        V0_net_list.append(net)

    big_data = np.loadtxt(open(
        "/Users/conradmilhaupt/Documents/ScientistSimulation2/data/nn/V1_data.csv",
        "rb"),
                          delimiter=",",
                          skiprows=1
                          )  # sci_id, tp, sci_age, actual_returns, ideas...
    idea_data = np.loadtxt(open(
        "/Users/conradmilhaupt/Documents/ScientistSimulation2/data/model/perceived_ideas.csv",
        "rb"),
                           delimiter=",",
                           skiprows=1)  # sci, idea, max, mean, sds
    q_data = np.loadtxt(open(
        "/Users/conradmilhaupt/Documents/ScientistSimulation2/data/model/num_k_total_idea_tp.csv",
        "rb"),
                        delimiter=",",
                        skiprows=1)[:, 1:]
    T_data = np.loadtxt(open(
        "/Users/conradmilhaupt/Documents/ScientistSimulation2/data/model/T_total_idea_tp.csv",
        "rb"),
                        delimiter=",",
                        skiprows=1)[:, 1:]

    V1_net_list = [brain.Brain(None)
                   ] * TP_ALIVE  # initiate list of placeholder neural nets

    age = TP_ALIVE - 1
    while age >= 0:
        print(
            '\n\n\n--------------------------NEURAL NET V1_{}----------------------------\n\n'
            .format(age))
        net = nn.NeuralNet("V1_{}".format(age))
        data = big_data[np.where(big_data[:, 2] == age)]
        V1_train_data = []

        for row in data:  # process each action space across all action spaces given specific scientist age
            V1_in_vec = []
            sci_id = int(row[0])
            tp = int(row[1])
            for idea_idx in row[
                    4:]:  # get V0 for each idea in the action space
                idea_idx = int(idea_idx)
                if idea_idx == -1:
                    break  # -1 signals end of idea action space vector, no more ideas in this action space

                q = q_data[idea_idx, tp]
                T = T_data[idea_idx, tp]

                query1 = idea_data[np.where(idea_data[:, 0] == sci_id)]
                idea_row = query1[np.where(query1[:, 1] == idea_idx)][
                    0]  # fetch 1d list, which is the only element in a 2d list
                maxx = idea_row[2]
                mean = idea_row[3]
                sds = idea_row[4]

                input_data = [q, T, maxx, mean, sds]
                V0 = V0_net_list[age].predict(np.asarray(
                    input_data))  # get V0 based on state space of each idea
                V1_in_vec.append(V0)

            while len(
                    V1_in_vec
            ) < MAX_IDEAS:  # convert all the -1's of nonexist ideas to potential impact of 0
                V1_in_vec.append(
                    0)  # zero padding the input layer of the neural net

            # calculate the true valuation returns of the scientist's action space to train against as output
            U_e = row[
                3]  # actual returns U_e used to train V_a, different from Smart_Optimize.java
            if age == TP_ALIVE - 1:  # backwards recursion, last age before scientist "death" has no NPV, just based on present returns
                true_out = U_e  # V7 = U(e7) if TP_ALIVE == 7
            else:  # backwards recursion where scientist accounts for both returns in present + future returns, predict V_(alpha+1) based on action space for current age alpha
                true_out = U_e + BETA * V1_net_list[age + 1].predict(
                    np.asarray(V1_in_vec)
                )  # V6 = U(e6) + BETA * V7 --> alpha from 0 to TP_ALIVE - 2

            V1_in_vec.append(
                true_out
            )  # add output as the last element of the input layer, follows format of training in the neural net class
            V1_train_data.append(
                np.asarray(V1_in_vec)
            )  # add this action space to the set of all action spaces used to train V1

        print(np.asarray(V1_train_data))
        net.set_train_data(np.asarray(V1_train_data))
        net.build_model()
        net.train_model()
        net.save_model()
        net.check1()
        # net.check2()

        load_net = brain.Brain.load_brain(
            "V1_{}".format(age)
        )  # NOTE: adding Brain class as element, not the Neural Net class
        V1_net_list[
            age] = load_net  # add current net --> V_age to its respective index/position in V1_net_list by age
        age -= 1  # train next neural net according to backwards recursion

    print("\n\n\n\n\nDONE TRAINING V1's\n\n\n\n\n")
Пример #18
0
import datetime
import pandas as pd

import google_quote
import neural_net
import neural_net_trainer
import data_entry

data = google_quote.GoogleIntradayQuote('tsla', 300, 30)
data.write_csv("data.csv")
data = pd.read_csv(
    "data.csv",
    names=["SYMBOL", "DATE", "TIME", "OPEN", "HIGH", "LOW", "CLOSE", "VOLUME"])

# Creates a neural net with i input nodes, j hidden nodes, and k output nodes
i = 10
j = 10
k = 1
net = neural_net.NeuralNet(i, j, k)
trainer = neural_net_trainer.NeuralNetTrainer(net)

# We now need to prep the data into training sets and target sets
# Training sets will consist of a set of ten stock values and an indicator as to whether holding
# the stock from time 10 to 11 will result in a gain or a loss; super simplistic classificaiton
Пример #19
0
import neural_net


def stochastic_epoch(network, X, y):
    order = range(X.shape[0])
    random.shuffle(order)
    for c in order:
        network.backpropagate(X[c], y[c])


def SGD(network, X, y, epochs):
    for c in range(epochs):
        stochastic_epoch(network, X, y)


def train_nn(network, X, y, Xtest, ytest):
    for c in range(10):
        SGD(network, X, y, 50)
        print "training error: " + str(network.classification_error(X, y))
        print "test error    : " + str(
            network.classification_error(Xtest, ytest))
        plotter.plot_2D_model_predictions(network, X, y,
                                          "plots/foo/run_" + str(c) + ".png")


X, y = matrix_io.load_dataset("data/xor")
Xt, yt = matrix_io.load_dataset("data/xor_test")
network = neural_net.NeuralNet([2, 4, 4, 1], 3)
train_nn(network, X, y, Xt, yt)
Пример #20
0
    print("Cuda availability: "+str(torch.cuda.is_available()))
    if torch.cuda.is_available():
        print("Using GPU")
        dev = "cuda:0"
    else:
        print("Using CPU")
        dev = "cpu"

    torch.cuda.set_device(dev)
    print('Current cuda device ', torch.cuda.current_device())

    #get train and valid loaders used to train and validate
    train_loader, valid_loader = data_provider.get_train_and_validation_data_loader(data_path, test_train_ratio, seed)
    print("Created Datasets.")
    #init model
    model = neural_net.NeuralNet()
    model.cuda()
    model.to(dev)
    # init start params
    lossFunction = nn.NLLLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

    num_epochs = 30
    for epoch in range(num_epochs):
        loss_ = 0
        for images, labels in train_loader:
            images, labels = images.to(dev), labels.to(dev)
            # Forward Pass
            output = model(images)
            # Loss at each iteration by comparing to target(label)
            loss = lossFunction(output, labels)
import numpy as np
import neural_net as nn
import data_manipulation as dm
import relu

train_x = np.loadtxt("train_x", max_rows=20000) / 255
train_y = np.loadtxt("train_y", max_rows=20000)

activation = relu.relu()
net = nn.NeuralNet(train_x, activation, 0.01, 50, 10)
acuurcy = net.train(train_x, train_y, 10, 2)
print(acuurcy)

test_x = np.loadtxt("test_x") / 255
test_y = np.zeros(test_x.shape[0])

for i in range(test_x.shape[0]):
    test_y[i] = (net.predict_no_batch(test_x[i], 0, 0))

output = ""
for i in range(test_y.shape[0]):
    output = output + str(test_y[i].astype(int)) + "\n"

output_file = open("test_y", "w+")
output_file.write(output)
output_file.close()


Пример #22
0
 def retrain_NN(self):
     neural_net = nn.NeuralNet(self.__architecture, self.__dataset)
     neural_net.train()
     self.__W, self.__B = neural_net.extract_params()
Пример #23
0
    def DoOneRun(self,
                 run_id,
                 rf_number,
                 nn_replication,
                 prefix='',
                 seed=0,
                 batch_count=1):
        batch_size = self.config.batch_size

        self.config.rf_number = rf_number
        self.config.rf_file_name = ('features_' + prefix + '_' +
                                    str(rf_number) + '_' + str(run_id) +
                                    '.pkl')
        srf = rf.GenerateOrLoadRF(self.config, seed=run_id + 2718281828 + seed)

        if isinstance(nn_replication, (list, tuple)):
            self.skeleton.SetReplication(nn_replication)
        else:
            self.skeleton.SetReplication(
                [int(x * nn_replication) for x in self.original_replication])
        with tf.Graph().as_default(), tf.Session('') as sess:
            examples = self.get_inputs(batch_size)

            # Calculate the exact gram matrix for the batch
            gram = tf.reshape(kf.Kernel(self.skeleton, examples, examples),
                              [batch_size, batch_size])

            # Calculate the approximate gram matrix using a neural net
            rep, _ = NN.NeuralNet(self.skeleton, self.config, examples)
            srep = tf.squeeze(rep)
            approx_gram = tf.matmul(srep, tf.transpose(srep))

            # Normalize the approximate gram matrix to so that the norm of
            # each element is 1.
            norms = tf.reshape(tf.sqrt(tf.diag_part(approx_gram)), [-1, 1])
            nn_gram = tf.div(approx_gram, tf.matmul(norms,
                                                    tf.transpose(norms)))

            # Compute the approximate gram matrix using random features
            parameters = tf.constant(
                np.zeros((rf_number,
                          self.config.number_of_classes)).astype(np.float32))
            rand_features = tf.SparseTensor(srf.features[0], srf.features[1],
                                            srf.features[2])
            _, rf_vectors = rf.RandomFeaturesGraph(
                self.skeleton, self.config.number_of_classes, examples,
                rf_number, rand_features, parameters, srf.weights)
            rf_gram = tf.matmul(rf_vectors, rf_vectors, transpose_b=True)
            sess.run(tf.global_variables_initializer())
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess, coord)
            RF_K_stat = Stat()
            NN_K_stat = Stat()
            for i in xrange(batch_count):
                gram_np, nn_gram_np, rf_gram_np, approx_gram_np = sess.run(
                    [gram, nn_gram, rf_gram, approx_gram])
                RF_K_stat.AddToStat(gram_np, rf_gram_np)
                NN_K_stat.AddToStat(gram_np, nn_gram_np)
            coord.request_stop()
            coord.join(threads)
            return NN_K_stat, RF_K_stat
Пример #24
0
import os
import chess
from dataset import *
from game import *
import math
import neural_net

if __name__ == '__main__':
    moves = 20
    nn = neural_net.NeuralNet(moves)

    # nn.summary()

    df = load_dataset()
    games = parse_games(df, None)
    train, test = split_games(games, 0.8, moves, True)

    x_train, y_train = transform(train, moves)
    x_test, y_test = transform(test, moves)

    nn.fit(x_train, y_train, 10)

    nn.summary()

    acc = nn.eval(x_test, y_test)
    print("Acc: ", acc)
Пример #25
0
out_num = np.max(train_label) + 1
# 数据预处理:特征采用Min-Max normalization方法处理,输出标签转换为向量方式.
train_image = train_image.reshape(train_num, 28 * 28) / 255 - 0.5
train_sparse_label = np.zeros((train_num, out_num))
train_sparse_label[np.arange(0, train_num), train_label] = 1
test_image = test_image.reshape(test_num, 28 * 28) / 255 - 0.5
test_sparse_label = np.zeros((test_num, out_num))
test_sparse_label[np.arange(0, test_num), test_label] = 1
print(type(train_image))
select = 1
if select > 0:
    # 开始训练
    a = NN.NeuralNet(hidden_layers=(100, ),
                     batch_size=200,
                     learning_rate=1e-3,
                     max_iter=10,
                     tol=1e-4,
                     alpha=1e-5,
                     activation='relu',
                     solver='adam')
    a.fit(train_image, train_sparse_label)
    y = a.predict(test_image)
    print(
        'accuracy:', 1 -
        np.sum(np.abs(y - test_sparse_label)) / 2 / test_sparse_label.shape[0])
else:
    # sklearn中的MLP进行对比
    from sklearn.neural_network import MLPClassifier

    mlp = MLPClassifier(hidden_layer_sizes=(100, ),
                        max_iter=30,
                        alpha=1e-4,
Пример #26
0
####
# Train and save a model
####

import numpy as np
import data_processing as dp
import neural_net

if __name__ == "__main__":

    epochs = 50
    batch_size = 64
    ngames = 10000
    NAME = 'test'

    X, y = dp.load_neural_net_data(name='', ngames=ngames)
    print("X shape: ", X.shape)
    print("Y shape: ", y.shape)

    nn_mdl = neural_net.NeuralNet(input_dim=(773, ))

    nn_mdl.fit(X[:, 0, :],
               X[:, 1, :],
               y,
               batch_size=batch_size,
               epochs=epochs,
               validate=True)

    nn_mdl.model.save(
        f'trained_models/{NAME}_{ngames}_bsize{batch_size}_epochs{epochs}')
Пример #27
0
                      format(minibatch[m], alpha[a], avg_val_error))

                if avg_val_error < min_val_error:
                    min_val_error = avg_val_error
                    best_batch = minibatch[m]
                    best_alpha = alpha[a]

        print("When batch size is {0}, alpha is {1}, test error is {2}".format(
            best_batch, best_alpha,
            utils.classification_error(model.predict(Xtest), ytest)))

    elif Model == 'mlp':

        # MLP
        hidden_layer_sizes = [50]
        model = neural_net.NeuralNet(hidden_layer_sizes)

        model.fitWithSGD(X, Y, epoch=100, minibatch=1000, alpha=0.001)

        print("When using SGD, training error %.3f" %
              utils.classification_error(model.predict(X), y))
        print("When using SGD, validation error %.3f" %
              utils.classification_error(model.predict(Xtest), ytest))

        model.fit(X, Y)
        print("When using GD, training error %.3f" %
              utils.classification_error(model.predict(X), y))
        print("When using GD, validation error %.3f" %
              utils.classification_error(model.predict(Xtest), ytest))

    else:
Пример #28
0
####
# Train and save a model
####

import numpy as np
import data_processing as dp
import neural_net

if __name__ == "__main__":

    ngames = int(5 * 1e5)
    NAME = 'all_ratings'
    X, y, weights = dp.load_data(ngames=ngames, use_cache=True, name=NAME)
    nn_mdl = neural_net.NeuralNet(input_dim=(8, 8, 7))

    batch_size = 512
    epochs = 5
    val_split = 0.2

    nn_mdl.model.fit(X,
                     y,
                     batch_size=batch_size,
                     epochs=epochs,
                     verbose=1,
                     validation_split=val_split,
                     sample_weight=weights)

    nn_mdl.model.save(
        f'trained_models/{NAME}_{ngames}_bsize{batch_size}_epochs{epochs}')
Пример #29
0
from image_data_set import ImageDataSet

import neural_net

# import use_model
# import neural_net

# read config.ini
import ConfigParser
inifile = ConfigParser.SafeConfigParser()
inifile.read('./config.ini')
NUM_CLASSES = int(inifile.get("settings", "num_classes"))
DOWNLOAD_LIMIT = int(inifile.get("settings", "download_limit"))

if __name__ == "__main__":
    # result=cmd.getstatusoutput("touch test.txt")
    app_root_path = os.getcwd() + "/"
    # 画像の種類ごとのラベルファイルの生成
    ImageDataSet.create_train_labels(app_root_path, NUM_CLASSES)
    ImageDataSet.create_test_labels(app_root_path, NUM_CLASSES)
    # ラベルファイルの統合
    ImageDataSet.joint_train_labels(app_root_path, NUM_CLASSES)
    ImageDataSet.joint_test_labels(app_root_path, NUM_CLASSES)
    # use_model.pyの実行
    # os.system("python use_model.py")
    img = cv2.imread("tmp/147.jpg", 1)
    net = neural_net.NeuralNet()
    # net.classificate_face()
    print net.classificate_one_face(img)
    net.classificate_face()