def __init__(self, game, numData):
        self.n = game.n
        self.K = game.K
        self.K2 = game.K2
        self.K3 = game.K3
        self.seed = game.seed
        self.numData = numData

        self.Ntest = 500000
        self.lr = 1e-1
        self.nepoch = 30
        self.nstep = 30
        self.lb = -1
        self.ub = 1

        np.random.seed(self.seed)
        torch.manual_seed(self.seed)
        self.target = copy.deepcopy(game.f)
        self.target.train = True
        self.learn_model = NN(game.K, game.K2, game.K3, True)
        self.history = np.zeros(4)

        self.xTest = torch.randn(self.Ntest, self.n, self.K)
        testScore = self.target.forward(self.xTest)
        testProb = torch.nn.functional.softmax(testScore, dim=1)
        self.yTest = torch.squeeze(torch.multinomial(testProb, 1))
Exemple #2
0
    def __init__(self, size=64, gray=False, gen_depth=16):
        NN.__init__(self, gray=gray)

        # input image size
        self.size = size
        self.gen_depth = gen_depth
        self.G = None
Exemple #3
0
def study_ppal_components(n_training_img, k_ppal_components):
    """
    Show the principal components of the NN
    :param n_training_img: Number of training images per person to use
    :param k_ppal_components: Number of principal components to use in the NN
    """
    train_img, train_labels, test_img, test_labels = load_images(
        n_training_img)
    nearest_neighbor = NN()
    nearest_neighbor.train(train_img, train_labels, k_ppal_components)

    sqrt = math.sqrt(k_ppal_components)
    rows = sqrt if sqrt == int(sqrt) else int(sqrt) + 1
    i = 0
    for eigenface in nearest_neighbor.eigenfaces:
        i += 1
        if i > rows * int(sqrt):
            break
        plt.subplot(int(sqrt), rows, i)
        plt.imshow(shape_image(eigenface), cmap="gray")
        plt.xticks([])
        plt.yticks([])

    plt.subplots_adjust(wspace=0, hspace=0)
    # plt.suptitle(f'Eigenvectors. {n_training_img} training images, {k_ppal_components} eigenfaces')

    # plt.title("Eigenfaces used")
    plt.show()
Exemple #4
0
    def __init__(self, seed, n, K, K2, K3, target, numData):
        self.n = n
        self.K = K
        self.K2 = K2
        self.K3 = K3
        self.seed = seed
        self.numData = numData

        self.Nstrat = self.K
        self.Ntest = 500000
        self.lr = 1e-1
        self.nepoch = 20
        self.nstep = 10
        self.lb = -1
        self.ub = 1


        np.random.seed(self.seed)
        torch.manual_seed(self.seed)
        self.target = target

        self.CF_model = NN(K, K2, K3, True)
        self.GD_model = NN(K, K2, K3, True)
        self.historyCF = np.zeros((self.numData.size, 4))
        self.historyGD = np.zeros((self.numData.size, 4))

        self.xTest = torch.randn(self.Ntest, self.n, self.K)
        testScore = self.target.forward(self.xTest)
        testProb = torch.nn.functional.softmax(testScore, dim=1)
        self.yTest = torch.squeeze(torch.multinomial(testProb, 1))
Exemple #5
0
    def __init__(self):
        QMainWindow.__init__(self)
        loadUi('mainwindow.ui', self)
        self.inputLetter.clicked.connect(self.showInputWidget)
        # Сколько строк и столбцов в поле ввода
        self.inputSize = 5

        self.hiddenLayerSize = 12

        self.outputSize = 5

        self.iterations = 1000
        self.lr = 0.3
        self.data = Data()
        self.data.generate_symbols(4, 1)

        self.inputtedLetter = [0 for i in range(self.inputSize ** 2)]

        self.nn = NN(self.inputSize ** 2,
                     self.outputSize,
                     self.hiddenLayerSize,
                     self.iterations,
                     self.data,
                     self.lr
                     )
Exemple #6
0
 def __init__(self):
     self.env = gym.make("BreakoutNoFrameskip-v4")
     self.env = wrap_deepmind(self.env, frame_stack=True, scale=True)
     self.replay_size = 40
     # setup baseline
     # baseline will determine which replay pack the replay will be put into
     self.baseline = RecentAvg(size=HydrAI.HEADS_N *
                                    self.replay_size, init=0)
     self.baseline_mid = 0
     self.baseline_range = 0
     self.replays = {
         "good": ReplayPack(self.replay_size),
         "normal": ReplayPack(self.replay_size),
         "bad": ReplayPack(self.replay_size)
     }
     feature_size = self.env.observation_space.shape
     action_size = self.env.action_space.n
     self.nns = {
         "good": NN(feature_size, action_size,
                    [partial(self.replays["good"].sample, 32)],
                    "good_"),
         "normal": NN(feature_size, action_size,
                      [partial(self.replays["normal"].sample, 32)],
                      "normal_"),
         "bad": NN(feature_size, action_size,
                   [partial(self.replays["bad"].sample, 32)],
                   "bad_")
     }
     self.a = list(range(action_size))
Exemple #7
0
def GridSearch(epochs, trainloader, testloader, num_sample, input_dim,
               OUTPUT_DIM, HIDDEN_DIMS, LRS, L2_LAMBD):
    best_params = {}
    best_acc = -1
    for hidden_dim in HIDDEN_DIMS:
        for LR in LRS:
            for lambd in L2_LAMBD:
                model = NN(num_sample,
                           input_dim,
                           hidden_dim,
                           OUTPUT_DIM,
                           init_method='He')
                costs = model.train(trainloader, LR, lambd, epochs)
                acc = Accuracy(model.predict(testloader['X']), testloader['Y'])
                if acc > best_acc:
                    best_acc = acc
                    best_params['hidden_dim'] = hidden_dim
                    best_params['learning_rate'] = LR
                    best_params['L2_lambd'] = lambd
                    best_params['costs'] = costs
                    best_params['params'] = model.params
                print(
                    'GridSearching: Hidden_dim: {:d}, Learning Rate: {:f}, L2 lambda: {:f} ---> Accuracy: {:f}.'
                    .format(hidden_dim, LR, lambd, acc))
    return best_params
Exemple #8
0
    def __init__(self, game, numData):
        self.n = game.n
        self.K = game.K
        self.seed = game.seed
        self.numData = numData

        self.Nstrat = self.K
        self.Ntest = 500000
        self.lr = 1e-1
        self.nepoch = 20
        self.nstep = 10
        self.lb = -1
        self.ub = 1

        np.random.seed(self.seed)
        torch.manual_seed(self.seed)

        self.target = game.f
        self.CF_model = NN(self.K, 10, 20, True)
        self.GD_model = NN(self.K, 10, 20, True)
        self.historyCF = np.zeros(4)
        self.historyGD = np.zeros(4)

        self.xTest = torch.randn(self.Ntest, self.n, self.K)
        testScore = self.target.forward(self.xTest)
        testProb = torch.nn.functional.softmax(testScore, dim=1)
        self.yTest = torch.squeeze(torch.multinomial(testProb, 1))
Exemple #9
0
    def __init__(self, nn=None):
        """ 
        Initialize blob by inheriting ParentSprite and assigning attributes

        Args:
            nn (class): can pass in the neural net from another blob
        """
        super(Blob, self).__init__()  #values are not needed
        self.int_center = int(self.center_x), int(self.center_y)
        self.radius = 10
        self.angle = random.uniform(0, 2 * np.pi)
        self.energy = MAX_ENERGY
        self.alive = True
        self.food_eaten = 0
        self.score_int = 0

        self.sight_angle = 10 * (np.pi / 180.)
        self.sight_radius = 1000

        self.target_blob = self
        self.target_food = self

        self.last_angle = .01

        #scoring related
        self.dist_moved = 0
        self.color = int(self.energy / 4 + 5)

        # Neural Network stuff here:
        if nn is not None:
            self.nn = NN(((1, nn), ))
        else:
            self.nn = NN()
Exemple #10
0
 def __init__(self, nn):
     if type(nn) == type(OrderedDict()):
         self._nn = NN(nn['player'])
     else:
         self._nn = nn
     self._x = np.zeros(shape=(NX, ))
     self._epsSame = 1e-2
     self._rand = random.Random()
Exemple #11
0
    def __init__(self):
        self.maxdepth = 4
        self.moves = ["up", "right", "down", "left"]
        self.nn = NN([16, 4])

        self.weightFile = open("2048_nn_weights.pysave", "wb+")
        if os.stat("2048_nn_weights.pysave").st_size > 0:
            self.nn.loadWeights(self.weightFile)

        signal.signal(signal.SIGINT, self.signal_handler)
Exemple #12
0
    def test_another_rbmtrain(self, n):
        _dbn = DBN([784, 1000, 500, 250, 30], learning_rate=0.01, cd_k=1)
        print(len(mnist.train.images))
        for j in range(5):
            for i in range(10):
                _dbn.pretrain(mnist.train.images[i * 5500:i * 5500 + 5500],
                              128, 5)

        _nnet = NN([784, 1000, 500, 250, 30, 250, 500, 1000, 784], 0.01, 128,
                   50)
        _nnet.load_from_dbn_to_reconstructNN(_dbn)
        _nnet.train(mnist.train.images, mnist.train.images)
        _nnet.test_linear(mnist.test.images, mnist.test.images)

        x_in = mnist.test.images[:30]
        _predict = _nnet.predict(x_in)
        _predict_img = np.concatenate(np.reshape(_predict, [-1, 28, 28]),
                                      axis=1)
        x_in = np.concatenate(np.reshape(x_in, [-1, 28, 28]), axis=1)
        img = Image.fromarray((1.0 - np.concatenate(
            (_predict_img, x_in), axis=0)) * 255.0)
        img = img.convert('L')
        img.save(str(n) + '_.jpg')
        img2 = Image.fromarray((np.concatenate(
            (_predict_img, x_in), axis=0)) * 255.0)
        img2 = img2.convert('L')
        img2.save(str(n) + '.jpg')
Exemple #13
0
def trainEj1(inputLayers, lr, filenameInput, epocs, saveIn=None):
	X, Z, layers, activationFunctions = getTrainingParamsEj1(inputLayers, filenameInput, saveIn)
	nn = NN(layers, activationFunctions, lr)
	for i in range(epocs):
		e = nn.mini_batch(X,Z)
		print "Epoc %d error: %f" % (i, e)
		if (i % 50 == 0):
			Zhat = nn.predict(X[0:100,:])
			calcAuc(Z[0:100], Zhat)
	if (saveIn):
		saveAs(saveIn, nn)
Exemple #14
0
def main():

    training_data, test_data, output_rsts = load_data()

#######################################
#Training
#######################################
    input_layer_size = len(training_data[0][0])
    net = NN((input_layer_size, 30, 2), output_rsts)

    net.SGD(training_data, mini_batch_size=10, epochs=30, eta=3.0, test_data=test_data)
Exemple #15
0
def createNN(typeOfNN, costf, inputsize, outputsize, hiddensize, hiddenlayers,
             hiddenactivation):
    neuralNet = NN(typeOfNN, costf)
    neuralNet.createLayer(0, inputsize, "input")
    for i in range(hiddenlayers):
        neuralNet.createLayer(i + 1,
                              hiddensize,
                              activationMethod=hiddenactivation)
    neuralNet.createLayer(hiddenlayers + 1, outputsize, "output", "Sigmoid")
    for i in range(hiddenlayers + 1):
        neuralNet.linkLayer(i + 1, i)
    return neuralNet
Exemple #16
0
def test_calc_output_grad():
    config = {
        "learning rate": .0001,
        "debug": False,
        "layer node count": [2, 2, 2],
        "first moment decay rate": .9,
        "second moment decay rate": .999,
        "experience replay size": 8,
        "replay batches": 4,
        "model size": 50000,
        "report frequency": 10,
        "number episodes": 300000000,
        "initial temp": 1,
        "adam epsilon": 10**-8
    }

    num_inputs = config["layer node count"][0]
    num_hidden = config["layer node count"][1]
    num_output = config["layer node count"][2]
    config["weights"] = [
        np.ones((num_inputs, num_hidden)),
        np.ones((num_hidden, num_output))
    ]
    config["bias"] = [np.ones((num_hidden)), np.ones((num_output))]
    states = []
    action_indices = []
    targets = []
    finals = []
    for i in range(2):
        state = np.array([i, i])
        action_index = i
        target = i
        states.append(state)
        action_indices.append(action_index)
        targets.append(target)
        finals.append(i == 1)

    nn = NN(config)
    bg, wg = nn.calc_gradients(states, action_indices, states, np.ones(2),
                               finals, 1, 1)
    test_weight_g = [
        np.array([[3., 3.], [3., 3.]]),
        np.array([[-.5, 9.], [-.5, 9.]])
    ]
    test_bias_g = [np.array([[2.5, 2.5]]), np.array([[-.5, 3.]])]

    for i in range(2):
        if not np.allclose(wg[i], test_weight_g[i]):
            raise ValueError("Error in calc weight grad")

        if not np.allclose(bg[i], test_bias_g[i]):
            raise ValueError("Error in calc weight grad")
Exemple #17
0
def print_mean_faces():
    """ Print the mean faces for 3, 5 and 7 per person training images """
    for i, n_training in enumerate([3, 5, 7]):
        train_img, train_labels, test_img, test_labels = load_images(
            n_training)
        nearest_neighbor = NN()
        nearest_neighbor.train(train_img, train_labels, 10)
        plt.subplot(1, 3, i + 1)
        plt.imshow(shape_image(nearest_neighbor.mean_face), cmap='gray')
        plt.title(f"{n_training} training images")
        plt.axis('off')

    plt.show()
Exemple #18
0
 def learn(self):
     self.bound = self.getCFBound()
     for datasizei in range(self.numData.size):
         self.Ntrain = self.numData[datasizei]
         self.batch_size = int(self.Ntrain / self.nepoch)
         self.GD_model = NN(self.K, self.K2, self.K3, True)
         self.learnGD(datasizei)
         np.savetxt("historyGD"+str(self.n)+"_"+str(self.K)+"_"+str(self.seed)+".csv", self.historyGD, delimiter=',')
     for datasizei in range(self.numData.size):
         self.Ntrain = self.numData[datasizei]
         self.CF_model = NN(self.K, self.K2, self.K3, True)
         self.learnCF(datasizei)
         np.savetxt("historyCF"+str(self.n)+"_"+str(self.K)+"_"+str(self.seed)+".csv", self.historyCF, delimiter=',')
 def learn(self):
     self.Ntrain = self.numData
     self.batch_size = int(self.Ntrain / self.nepoch)
     if self.Ntrain >= 100000:
         self.batch_size = 5000
     self.learn_model = NN(self.K, self.K2, self.K3, True)
     self.learnGD()
     np.savetxt("history" + str(self.n) + "_" + str(self.K) + "_" +
                str(self.K2) + "_" + str(self.K3) + "_" + str(self.seed) +
                ".csv",
                self.history,
                delimiter=',')
     return self.learn_model
Exemple #20
0
    def __init__(self, game, residual_layers=5):
        """
        Args:
            game: A Game object
            residual_layers(int): number of residual layers. Default is 5
        """
        self.game = game
        input_shape = game.layers().shape
        policy_shape = len(game.action_space)

        self.nnet = NN(input_shape, residual_layers, policy_shape, True)
        self.path_1 = 'model/checkpoint/old/'
        self.path_2 = 'model/checkpoint/new/'
Exemple #21
0
class NetTrainer():
    """
    manages the two neural networks (older and newest)
    """
    def __init__(self, game, residual_layers=5):
        """
        Args:
            game: A Game object
            residual_layers(int): number of residual layers. Default is 5
        """
        self.game = game
        input_shape = game.layers().shape
        policy_shape = len(game.action_space)

        self.nnet = NN(input_shape, residual_layers, policy_shape, True)
        self.path_1 = 'model/checkpoint/old/'
        self.path_2 = 'model/checkpoint/new/'

    def train(self, name):
        """
        Args:
            name(string): 'new' or 'old'

        trains a specified neural network
        """
        if name == 'old':
            training_nn(self.game, self.nnet, self.path_1)
        elif name == 'new':
            training_nn(self.game, self.nnet, self.path_2)
        else:
            print("invalid name.")

    def prepare(self, name):
        """
        load a specified model which was previously saved
        """
        if name == 'old':
            self.nnet.pre_run(self.path_1)
        elif name == 'new':
            self.nnet.pre_run(self.path_2)
        else:
            print("invalid name.")

    def pred(self, new_input):
        """
        Args:
            new_input: a layers representation

        returns the predtion generated by the neural network
        """
        return self.nnet.pred(new_input)
Exemple #22
0
def compute_rms(source, target, nn=None):
    '''
    Make a single call to FLANN rms.

    If a flannobject with prebuilt index is given, use that,
    otherwise, do a full search.
    '''
    if nn:
        results, dists = nn.match(target)
    else:
        nn = NN()
        nn.add(source)
        results, dists = nn.match(target)
    return math.sqrt(sum(dists) / float(len(dists)))
Exemple #23
0
def compute_rms(source, target, nn=None):
    '''
    Make a single call to FLANN rms.

    If a flannobject with prebuilt index is given, use that,
    otherwise, do a full search.
    '''
    if nn:
        results, dists = nn.match(target)
    else:
        nn = NN()
        nn.add(source)
        results, dists = nn.match(target)
    return math.sqrt(sum(dists) / float(len(dists)))
Exemple #24
0
def main():
    # Parameters
    sizes = [784, 16, 16, 10]
    eta = 3
    mini_batch_size = 50
    epochs = 30

    nn = NN(sizes)
    emnistTrainData, validation_data, test_data = mnist_loader.load_data_wrapper(
    )
    results = nn.stochastic_gradient_descent(training_data=emnistTrainData,
                                             mini_batch_size=mini_batch_size,
                                             epochs=epochs,
                                             learning_rate=eta)

    epochResults = [item[0] for item in results]

    # Outputs image and what the trained network thinks it should be
    count = 0
    stop = 0  # int(input("How many images would you like to see?: "))
    while count < stop:
        example = emnistTrainData[count]
        pixels = example[0].reshape((28, 28))
        expectedLabel = np.argmax(example[1])
        predictedLabel = np.argmax(nn.feed_forward(example[0]))
        # Plot
        plt.title(
            'Expected: {expectedLabel}, Predicted: {predictedLabel}: '.format(
                expectedLabel=expectedLabel, predictedLabel=predictedLabel))
        plt.imshow(pixels, cmap='gray')
        plt.show()
        count += 1

    # Plot learning progress
    plt.plot(epochResults)
    plt.ylabel("# Correct")
    plt.xlabel("Epoch")
    plt.title("Learning Rate = " + str(eta))
    plt.show()

    # test network on test data set
    total = len(test_data)
    correct = 0
    for example in test_data:
        inputLayer = example[0]
        output = np.argmax((nn.feed_forward(inputLayer)))
        if output == example[1]:
            correct += 1
    print("Model Correctly Identified %d out of %d examples" %
          (correct, total))
Exemple #25
0
def test_feed_forward():
    config = {
        "learning rate": .0001,
        "debug": False,
        "layer node count": [2, 2, 2],
        "first moment decay rate": .9,
        "second moment decay rate": .999,
        "experience replay size": 8,
        "replay batches": 4,
        "model size": 50000,
        "report frequency": 10,
        "number episodes": 300000000,
        "initial temp": 1,
        "adam epsilon": 10**-8
    }
    num_inputs = config["layer node count"][0]
    num_hidden = config["layer node count"][1]
    num_output = config["layer node count"][2]
    config["weights"] = [
        np.ones((num_inputs, num_hidden)),
        np.ones((num_hidden, num_output)) * 2
    ]
    config["bias"] = [np.ones((num_hidden)), np.ones((num_output)) * 2]

    nn = NN(config)

    state = np.ones(num_inputs) * 1
    state2 = np.ones(num_inputs) * 2
    state3 = np.ones(num_inputs) * 3
    states = np.row_stack((state, state2, state3))
    zs, activations = nn.feed_forward(states)
    for i in range(3):
        if not np.allclose(activations[-2][i], num_inputs * (i + 1) + 1):
            raise ValueError("hidden outputs are off")
    for j in range(3):
        if not np.allclose(activations[-1][j],
                           ((num_inputs * (j + 1) + 1) * num_hidden * 2) + 2):
            raise ValueError("hidden outputs are off")

    # Test RELU

    state = np.ones(num_inputs) * -2
    states = state.reshape((1, 2))
    zs, activations = nn.feed_forward(states)
    if not np.allclose(activations[-2][:], 0):
        raise ValueError("hidden outputs are off")

    if not np.allclose(activations[-1][:], 2):  # only bias..
        raise ValueError("hidden outputs are off")
 def __init__(self, health, speed, coords, dna):
     Creature.__init__(self, health, speed, coords)
     self.actions = []
     self.lifespan = 0
     self.score = 0
     self.action_nn = NN(2, 2, 2)
     self.move_nn = NN(2, 1, 2)
     if (dna is None):
         self.move_nn.set_random_NN_weights()
         self.dna = self.move_nn.get_weights()
     else:
         print(dna, end="\n")
         print(dna)
         self.move_nn.set_NN_weights()
         self.dna = dna
Exemple #27
0
    def __init__(self, doc2vec):
        super(NNClassifier, self).__init__(doc2vec)

        self.nn_des = {
            'layer_description': [
                {
                    'name': 'input',
                    'unit_size': 100,
                },
                {
                    'name': 'hidden1',
                    'active_fun': tf.nn.relu,
                    'unit_size': 400,
                },
                {
                    'name': 'output',
                    'active_fun': None,
                    'unit_size': 59,
                },
            ],
        }
        self.max_pass = 5000
        self.batch_size = 10000
        self.step_to_report_loss = 5
        self.step_to_eval = 10
        self.nn_model = NN(self.nn_des)
        self.learning_rate = 0.01
    def __init__(self, seed, n, K):
        self.n = n
        self.K = K
        self.Kd = int(K * 2 / 3)
        self.Kc = int(K * 1 / 3)
        self.seed = seed
        np.random.seed(seed)
        self.eps = 1e-8
        self.CfeatureWeights = np.random.rand(self.Kc) - 0.5
        self.DfeatureWeights = np.random.rand(self.Kd) - 0.5
        for k in range(self.Kc):
            if self.CfeatureWeights[k] == 0:
                self.CfeatureWeights[k] = self.eps
        for k in range(self.Kd):
            if self.DfeatureWeights[k] == 0:
                self.DfeatureWeights[k] = self.eps
        self.nodes = [
            Node(K, self.CfeatureWeights, self.DfeatureWeights, seed)
            for i in range(n)
        ]
        self.us = np.array([node.u for node in self.nodes])
        maxCost = 0
        for i in range(n):
            maxCost += self.nodes[i].getMaxCost()
        self.budget = np.random.rand() * maxCost * 0.2

        self.f = NN(K, 10, 20, False)
        self.allWeights = np.concatenate(
            [self.CfeatureWeights, self.DfeatureWeights])
        self.f.input_linear.weight = torch.nn.Parameter(
            torch.unsqueeze(torch.tensor(self.allWeights, dtype=torch.float),
                            dim=0))
Exemple #29
0
def helper_test_coupling(my_activation, tf_activation, loss, inputs, y_true,
                         units):
    tf.random.set_seed(42)
    tf_layer = Dense(units, activation=tf_activation)
    tf_layer.build(inputs.shape)

    with tf.GradientTape(persistent=True) as tape:
        tape.watch([inputs, *tf_layer.trainable_weights])
        pred_tf = tf_layer(inputs)
        loss = loss(y_true, pred_tf)

    *grads_tf, dY = tape.gradient(
        loss, [inputs, *tf_layer.trainable_weights, pred_tf])

    tf.random.set_seed(42)
    my_layer = NN.Layer(units, my_activation)
    my_layer.build(inputs.shape)

    pred_my = my_layer(inputs)

    dX, [dW, dB] = my_layer.backprop(dY)
    grads_my = [dX, dW, dB]

    assert np.allclose(pred_my, pred_tf)

    assert all(
        np.allclose(grad_my, grad_tf)
        for grad_my, grad_tf in zip(grads_my, grads_tf))
Exemple #30
0
    def eat_food(self, model):
        """ 
        tests whether or not a blob eats food on a given frame. If a blob 
        eats food, remove the food, increase the blob's energy, asexually 
        reproduce based on its neural net dna, and do some population control.

        Args:
            model (object): contains attributes of the environment

        """
        for i in range(len(model.foods) - 1, -1, -1):
            f = model.foods[i]
            if self.intersect(f):
                self.food_eaten += 1
                self.energy += 500

                if self.energy > MAX_ENERGY:
                    self.energy = MAX_ENERGY

                del model.foods[i]

                model.foods.append(Food())

                model.blobs.append(Blob(NN([(1, self.nn)])))

                if len(model.blobs) > BLOB_NUM:
                    energy_list = []
                    for blob in model.blobs:
                        energy_list.append(blob.energy)
                    del model.blobs[np.argmin(energy_list)]
Exemple #31
0
class NNPlayer:
    def __init__(self, nn):
        if type(nn) == type(OrderedDict()):
            self._nn = NN(nn['player'])
        else:
            self._nn = nn
        self._x = np.zeros(shape=(NX, ))
        self._epsSame = 1e-2
        self._rand = random.Random()

    def setName(self, n):
        self._name = n

    def setSeed(self, seed):
        self._rand.seed(seed)

    def name(self):
        return self._name

    def sDict(self):
        return {'player': self._nn.sDict()}

    def _encodeBoard(self, ttt):
        marker = ttt.whoseTurn()
        for i, b in enumerate(ttt.board()):
            if b == game.Empty:
                self._x[i] = 0
                self._x[i + 9] = 0
            elif b == marker:
                self._x[i] = 1
                self._x[i + 9] = 0
            else:
                self._x[i] = 0
                self._x[i + 9] = 1

    def move(self, ttt):
        return self.moveAndValue(ttt)[0]

    def moveAndValue(self, ttt):
        self._encodeBoard(ttt)
        bestQ = -1e99
        qs = []
        vm = ttt.validMoves()
        for m in vm:
            ttt.add(m)
            self._encodeBoard(ttt)
            q = self._nn(self._x)[0]
            ttt.undo()
            self._encodeBoard(ttt)
            qs.append(q)
            if q > bestQ:
                bestQ = q

        bestMoves = []
        for iMove, q in enumerate(qs):
            if abs(q - bestQ) < self._epsSame:
                bestMoves.append(vm[iMove])

        return (self._rand.choice(bestMoves), bestQ, qs)
Exemple #32
0
def main(config):
    net = NN(config)

    model = config.get("Input", "loadmodel")
    if model:
        net.loadmodel(model)
    else:
        dataTrain = Data(config.get("Input","train"), config.get("Input","format"), net.verbosity)
        try:
            trpr = net.train( dataTrain )
        except KeyboardInterrupt:
            sys.stderr.write("Aborting the training procedure...\n")
            pass
    
    ctest = config.get("Input","test")
    if ctest:
        dataTest = Data(ctest, config.get("Input","format"), net.verbosity)
        conf, err, tepr = net.test( dataTest )

        output = net.metrics.obtain( dataTest, tepr, conf, err )
        print 
        print "Test statistics:"
        print conf
        print "\n".join(map(string.strip,filter(len,output.values())))

    ftr = config.get("Output", "probstrain")
    fte = config.get("Output", "probstest")
    if ftr: Data.writeProbs(trpr, ftr)
    if fte: Data.writeProbs(tepr, fte)

    model = config.get("Output", "savemodel")
    if model:
        net.savemodel(model)
Exemple #33
0
from nn import NN

nn = NN()

train = [[[0, 0], [0]], [[0, 1], [1]], [[1, 0], [1]], [[1, 1], [0]]]

for x in train:
    print(x)

nn.backPropagation(train, 0.5, 2, 3, 1)

print(nn.evaluate([0, 0]))
print(nn.evaluate([0, 1]))
print(nn.evaluate([1, 0]))
print(nn.evaluate([1, 1]))
Exemple #34
0
model8.prune(trainM2, ncMonks)
print "18. -----------prune----------"
model8.test(testM2, ncMonks, 1)
print "--------------------------------"
print "19. DT, Monks3, IGR--------------------------------"
model9 = DT()
model9.discreteTrain(trainM3, ncMonks, 0)
model9.test(testM3, ncMonks, 1)
# Prune
model9.prune(trainM3, ncMonks)
print "20. -----------prune----------"
model9.test(testM3, ncMonks, 1)
print "--------------------------------"
# Neural Network
print "21. NN, Iris, alternate weight, momentum--------------------------------"
model21 = NN(arch=[5, 6, ncIris])
model21.initializeWeight(trainSetIris)
model21.train(trainSetIris, ncIris)
model21.test(testSetIris, ncIris)
print "--------------------------------"
print "22. NN, Iris, not alternate weight, momentum--------------------------------"
model22 = NN(arch=[5, 6, ncIris])
model22.train(trainSetIris, ncIris)
model22.test(testSetIris, ncIris)
print "--------------------------------"
print "23. NN, Iris, not alternate weight, no momentum--------------------------------"
model23 = NN(arch=[5, 6, ncIris])
model23.turnOffMomentum()
model23.train(trainSetIris, ncIris)
model23.test(testSetIris, ncIris)
print "24. NN, Iris, alternate weight, no momentum--------------------------------"
from nn import NN

import scipy.io as sio

if __name__ == "__main__":

    # Create Training data
    train_data = sio.loadmat("ex4data1.mat")
    first_network = NN([400, 25, 10])
    input_size = len(train_data["X"])
    train_parameters = {
        "train_data": train_data,
        "step_size": 0.2,
        "input_size": input_size,
        "lambda": 1.7,
        "num_of_iterations": 400,
    }

    # print first_network.predict(test_data=train_data)
    first_network.train(**train_parameters)
    print first_network.predict(test_data=train_data)
Exemple #36
0
import sys
from nn import NN
import numpy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt

def sumaVector(a,b):
	return [x+y for x,y in zip(a,b)]



nn = NN()
trainingSet = []
testSet = []
if len(sys.argv) == 3:
	nn.loadWeights(sys.argv[1])
	testFile = open(sys.argv[2])

	#TEST SET	
	for line in testFile:
		string = line.split(" ", 3)
		x = [[float(string[0])/20.0,float(string[1])/20.0]]
		if int(string[2]) == 1 :
			x += [[1]]
		else:
			x += [[0]]
		testSet += [x]


Exemple #37
0
def icp(source, target, D=3, verbosity=0, epsilon=0.000001):
    '''
    Perform ICP for two arrays containing points. Note that these
    arrays must be row-major!

    NOTE:
    This function returns the rotation matrix, translation for a
    transition FROM target TO source. This approach was chosen because
    of computational efficiency: it is now possible to index the source
    points beforehand, and query the index for matches from the target.

    In other words: Each new point gets matched to an old point. This is
    quite intuitive, as the set of source points may be (much) larger.
    '''
    nn = NN()

    # init R as identity, t as zero
    R = np.eye(D, dtype='float64')
    t = np.zeros((1, D), dtype='float64')
    T = homogenize_transformation(R, t)
    transformed_target = target

    # Build index beforehand for faster querying
    nn.add(source)

    # Initialize rms to bs values
    rms = 2
    rms_new = 1

    while True:
        # Update root mean squared error
        rms = rms_new

        # Rotate and translate the target using homogeneous coordinates
        # unused: transformed_target = np.dot(T, target_h).T[:, :D]
        transformed_target = np.dot(R, transformed_target.T).T + t
        centroid_transformed_target = np.mean(transformed_target, axis=0)

        # Use flann to find nearest neighbours. Note that because of index it
        # means 'for each transformed_target find the corresponding source'
        results, dists = nn.match(transformed_target)

        # Compute new RMS
        rms_new = math.sqrt(sum(dists) / float(len(dists)))

        # Give feedback if necessary
        if verbosity > 0:
            sys.stdout.write("\rRMS: {}".format(rms_new))
            sys.stdout.flush()

        # We find this case some times, but are not sure if it should be
        # possible. Is it possible for the RMS of a (sub)set of points to
        # increase?
        # assert rms > rms_new, "RMS was not minimized?"

        # Check threshold
        if rms - rms_new < epsilon:
            break

        # Use array slicing to get the correct targets
        selected_source = nn.get(results)
        centroid_selected_source = np.mean(selected_source, axis=0)

        # Compute covariance, perform SVD using Kabsch algorithm
        correlation = np.dot(
            (transformed_target - centroid_transformed_target).T,
            (selected_source - centroid_selected_source))
        u, s, v = np.linalg.svd(correlation)

        # u . S . v = correlation =
        # V . S . W.T

        # Ensure righthandedness coordinate system and calculate R
        d = np.linalg.det(np.dot(v, u.T))
        sign_matrix = np.eye(D)
        sign_matrix[D-1, D-1] = d
        R = np.dot(np.dot(v.T, sign_matrix), u.T)
        t[0, :] = np.dot(R, -centroid_transformed_target) + \
            centroid_selected_source

        # Combine transformations so far with new found R and t
        # Note: Latest transformation should be on inside (r) of the equation
        T = np.dot(T, homogenize_transformation(R, t))

        if verbosity > 2:
            try:
                if raw_input("Enter 'q' to quit, or anything else to" +
                             "continue") == "q":
                    sys.exit(0)
            except EOFError:
                print("")
                sys.exit(0)
            except KeyboardInterrupt:
                print("")
                sys.exit(0)

    # Unpack the built transformation matrix
    R, t = dehomogenize_transformation(T)
    return R, t, T, rms_new, nn
def run():
    np.random.seed(142)
    
    # build a dataset from the word2vec features
    logging.info('Loading the word2vec model...')
    w2v = load_word2vec()
    
    vec_function = lambda sentence: get_word2vec_features(w2v, sentence)
    num_features = w2v.layer1_size
    
    logging.info('Building the word2vec sentence features...')
    w2v_X_train, w2v_Y_train, w2v_X_test , w2v_Y_test = load_data(vec_function, num_features)
    
    # del w2v # don't need it anymore
    
    # now train a neural net on this dataset
    logging.info('Training a neural net on the word2vec features...')
    w2v_nn = NN(400, 1000, 300)
    w2v_nn.train(w2v_X_train, w2v_Y_train)
    
    # threshold the results and show some evaluations to see if we can improve on this
    w2v_train_predictions = w2v_nn.predict_classes(w2v_X_train)
    w2v_test_predictions  = w2v_nn.predict_classes(w2v_X_test)
    
    print_evaluations(w2v_Y_test, w2v_test_predictions)
    
    # Now let's get the linguistic features
    logging.info('Building the linguistic features...')
    
    vec_function = lambda sentence: get_linguistic_features(sentence)
    num_features = NUM_LINGUISTIC_FEATURES
    ling_X_train, ling_Y_train, ling_X_test , ling_Y_test = load_data(vec_function, num_features)
    
    # now let's combine the output of the neural net with the linguistic features
    # first binarize the neural net predictions so that we have one indicator feature per class
    # convert the outputs to 3 indicator (i.e. binary) features
    mlb = MultiLabelBinarizer()
    w2v_train_predictions_binarized = mlb.fit_transform(w2v_train_predictions.reshape(-1, 1))
    w2v_test_predictions_binarized  = mlb.fit_transform(w2v_test_predictions.reshape(-1, 1))
    
    # now stack these with the ling features
    # now combine the features and train a new classifier
    X_train = np.hstack((
        w2v_train_predictions_binarized,
        ling_X_train
    ))
    X_test = np.hstack((
        w2v_test_predictions_binarized,
        ling_X_test
    ))
    
    logging.info('Normalising the final dataset to unit length')
    lengths = np.linalg.norm(X_train, axis=1)
    X_train = X_train / lengths[:, None] # divides each row by the corresponding element
    lengths = np.linalg.norm(X_test, axis=1)
    X_test = X_test / lengths[:, None]
    
    logging.info('Training a neural net on the final dataset...')
    nn = NN(X_train.shape[1], 3000, 600)
    nn.train(X_train, w2v_Y_train)

    predictions = nn.predict_classes(X_test)
    print_evaluations(w2v_Y_test, predictions)
    
    predictions = nn.predict_continuous(X_test)
    print_evaluations(w2v_Y_test, predictions, classification=False)
    
    # logging.info('Training a logistic regression model on the final dataset...')
    # lr = LogisticRegression(C=1e5, class_weight='auto', random_state=33)
    # lr.fit(X_train, w2v_Y_train)
    #
    # predictions = lr.predict(X_test)
    # print_evaluations(w2v_Y_test, predictions)
    
    logging.info('Done.')
import sys
from nn import NN

nn = NN()

if (len(sys.argv) < 5):
	print("Usage:\n   python Iris-binario.py training_file learning_rate number_hidden test_file")
	quit()

#ARGUMENTS
trainingFile = open(sys.argv[1])
n = float(sys.argv[2])
nhidden = int(sys.argv[3])
testFile = open(sys.argv[4])

#TRAINING SET
trainingSet = []
testSet = []
for line in trainingFile:
	string = line.split(",", 5)
	x = [[float(string[0])/10.0,float(string[1])/10.0,float(string[2])/10.0,float(string[3])/10.0]]
	if string[4].rstrip() == "Iris-setosa":
		x += [[1]]
	else:
		x += [[0]]
	trainingSet += [x]

#TEST SET
for line in testFile:
	string = line.split(",", 5)
	x = [[float(string[0])/10.0,float(string[1])/10.0,float(string[2])/10.0,float(string[3])/10.0]]