예제 #1
0
    def __init__(self, height, parameters):
        # draw.circle is not anti-aliased and looks rather ugly.
        # pygame.draw.circle(ATOM_IMG, (0, 255, 0), (15, 15), 15)
        # gfxdraw.aacircle looks a bit better.
        ATOM_IMG = pygame.Surface((21, 21), pygame.SRCALPHA)
        transparancy = random.randint(150, 230)
        pygame.gfxdraw.aacircle(ATOM_IMG, 10, 10, 10,
                                (0, 120, 150, transparancy))
        # pygame.gfxdraw.filled_circle(ATOM_IMG, 10, 10, 11, WHITE)
        pygame.gfxdraw.filled_circle(ATOM_IMG, 10, 10, 10,
                                     (0, 120, 150, transparancy))

        pygame.sprite.Sprite.__init__(self)
        # self.picture = pygame.image.load("flappy_bird.png")
        # self.image = pygame.transform.scale(self.picture, (45, 45))

        self.image = ATOM_IMG  # pygame surface you can draw on, 50x50 pixels
        # self.image = self.image.convert_alpha()
        # self.image.fill(WHITE)
        self.rect = self.image.get_rect()  # builds rectangle around surface
        # starts bird at 0 x coordinate and middle of screen
        self.rect.center = (100, height / 2)
        self.WINDOW_HEIGHT = height
        self.y_before_jump = 35  # max
        self.score = 0  # the min. score (not getting through first pipe)
        self.fitness = 0
        self.velocity = 0

        if parameters == None:
            self.brain = nnet.NeuralNetwork(brain_dims, None)
        else:
            self.brain = nnet.NeuralNetwork(brain_dims, parameters)
예제 #2
0
파일: mlp.py 프로젝트: ChristophRaab/cpn_ba
def run():
    # Fetch data
    digits = sklearn.datasets.load_digits()
    X_train = digits.data
    X_train /= np.max(X_train)
    y_train = digits.target
    n_classes = np.unique(y_train).size

    # Setup multi-layer perceptron
    nn = nnet.NeuralNetwork(layers=[
        nnet.Linear(
            n_out=50,
            weight_scale=0.1,
            weight_decay=0.002,
        ),
        nnet.Activation('relu'),
        nnet.Linear(
            n_out=n_classes,
            weight_scale=0.1,
            weight_decay=0.002,
        ),
        nnet.LogRegression(),
    ], )

    # Verify network for correct back-propagation of parameter gradients
    print('Checking gradients')
    nn.check_gradients(X_train[:100], y_train[:100])

    # Train neural network
    print('Training neural network')
    nn.fit(X_train, y_train, learning_rate=0.1, max_iter=25, batch_size=32)

    # Evaluate on training data
    error = nn.error(X_train, y_train)
    print('Training error rate: %.4f' % error)
예제 #3
0
def mk_samples(Nx, Ny, nb_samples, rect=False):
    X = np.ndarray(shape=(nb_samples, 5))
    Y = np.ndarray(shape=(nb_samples, 5))
    ind = 0

    filename = '%s/nnet/ACASXU_run2a_%d_%d_batch_2000.nnet' % (
        os.path.dirname(__file__) or '.', Nx, Ny)
    nn = nnet.NeuralNetwork(filename, True, True)

    X = np.random.uniform(nn.input_min, nn.input_max,
                          [nb_samples, nn.output_dims])

    Y = np.zeros(nb_samples)

    ind = 0
    while ind < nb_samples:
        outputs = nn.evaluate(X[ind])
        if test_properties(Nx, Ny, X[ind], outputs) is False:
            continue

        Y[ind] = np.argmin(outputs)
        if rect:
            rho, theta, psi, v_own, v_int = X[ind]
            X[ind][0] = rho * np.cos(theta)
            X[ind][1] = rho * np.sin(theta)

        ind += 1

    # ensure all labels are present in data
    s = set(Y)
    for lbl in range(5):
        if lbl not in s:
            Y[lbl] = lbl

    return X, Y
예제 #4
0
def run():
    # Fetch data
    mnist = sklearn.datasets.fetch_mldata('MNIST original', data_home='./data')
    split = 60000
    X_train = np.reshape(mnist.data[:split], (-1, 1, 28, 28)) / 255.0
    y_train = mnist.target[:split]
    X_test = np.reshape(mnist.data[split:], (-1, 1, 28, 28)) / 255.0
    y_test = mnist.target[split:]
    n_classes = np.unique(y_train).size

    # Downsample training data
    n_train_samples = 3000
    train_idxs = np.random.random_integers(0, split - 1, n_train_samples)
    X_train = X_train[train_idxs, ...]
    y_train = y_train[train_idxs, ...]

    # Setup convolutional neural network
    nn = nnet.NeuralNetwork(layers=[
        nnet.Conv(
            n_feats=12,
            filter_shape=(5, 5),
            strides=(1, 1),
            weight_scale=0.1,
            weight_decay=0.001,
        ),
        nnet.Activation('relu'),
        nnet.Pool(
            pool_shape=(2, 2),
            strides=(2, 2),
            mode='max',
        ),
        nnet.Conv(
            n_feats=16,
            filter_shape=(5, 5),
            strides=(1, 1),
            weight_scale=0.1,
            weight_decay=0.001,
        ),
        nnet.Activation('relu'),
        nnet.Flatten(),
        nnet.Linear(
            n_out=n_classes,
            weight_scale=0.1,
            weight_decay=0.02,
        ),
        nnet.LogRegression(),
    ], )

    # Train neural network
    t0 = time.time()
    nn.fit(X_train, y_train, learning_rate=0.05, max_iter=3, batch_size=32)
    t1 = time.time()
    print('Duration: %.1fs' % (t1 - t0))

    # Evaluate on test data
    error = nn.error(X_test, y_test)
    print('Test error rate: %.4f' % error)
예제 #5
0
파일: run_tests.py 프로젝트: RedFT/nnet
def three_layer_test():
    Xtr, Ytr, Xte, Yte, label_names = test.get_cifar10_dataset()

    # Reshape each data point to be a 1-dimensional array, for a plain neural network.
    Xtr = Xtr.reshape(50000, 32 * 32 * 3)
    Xte = Xte.reshape(10000, 32 * 32 * 3)

    # PRE-PROCESSING
    Xtr = test.normalize(Xtr)
    Xte = test.normalize(Xte)

    mean = np.mean(np.concatenate([Xtr, Xte]), axis=0)
    Xtr = Xtr - mean
    Xte = Xte - mean

    Xtr = test.append_zeros(Xtr)
    Xte = test.append_zeros(Xte)

    # Neural Net
    nn = nnet.NeuralNetwork(Xtr.shape[1])
    nn.batch_size = 512

    nn.set_training_set(Xtr.T, Ytr)
    nn.set_testing_set(Xte.T, Yte)

    nn.add_layer(
        nnet.FullyConnectedLayer(pass_type="test|train",
                                 output_size=100,
                                 initialization_type='xavier'))
    nn.add_layer(nnet.BatchNormalizationLayer(pass_type="test|train"))
    nn.add_layer(
        nnet.ActivationLayer(pass_type="test|train",
                             activation_type="leaky_relu"))

    nn.add_layer(
        nnet.FullyConnectedLayer(pass_type="test|train",
                                 output_size=50,
                                 initialization_type='xavier'))
    nn.add_layer(nnet.BatchNormalizationLayer(pass_type="test|train"))
    nn.add_layer(
        nnet.ActivationLayer(pass_type="test|train",
                             activation_type="leaky_relu"))

    nn.add_layer(
        nnet.FullyConnectedLayer(pass_type="test|train",
                                 output_size=10,
                                 initialization_type='xavier'))
    nn.add_layer(nnet.SoftmaxLayer(pass_type="test"))
    nn.add_layer(nnet.LossLayer(pass_type="train"))

    # Print out each layer's information in order, then train.
    nn.print_info()
    final_loss, final_accuracy = nn.train(iterations=300)
    assert (final_loss < 1)
    assert (final_accuracy > 0.30)
예제 #6
0
def run():
    # Fetch data
    mnist = sklearn.datasets.fetch_mldata('MNIST original', data_home='./data')
    split = 60000
    X_train = mnist.data[:split] / 255.0
    y_train = mnist.target[:split]
    X_test = mnist.data[split:] / 255.0
    y_test = mnist.target[split:]
    n_classes = np.unique(y_train).size

    # Downsample training data
    n_train_samples = 10000
    train_idxs = np.random.random_integers(0, split - 1, n_train_samples)
    X_train = X_train[train_idxs, ...]
    y_train = y_train[train_idxs, ...]

    # Setup multi-layer perceptron
    nn = nnet.NeuralNetwork(layers=[
        nnet.Linear(
            n_out=100,
            weight_scale=0.2,
            weight_decay=0.004,
        ),
        nnet.Activation('relu'),
        nnet.Linear(
            n_out=50,
            weight_scale=0.2,
            weight_decay=0.004,
        ),
        nnet.Activation('relu'),
        nnet.Linear(
            n_out=n_classes,
            weight_scale=0.2,
            weight_decay=0.004,
        ),
        nnet.LogRegression(),
    ], )

    # Train neural network
    t0 = time.time()
    nn.fit(X_train, y_train, learning_rate=0.1, max_iter=5, batch_size=64)
    t1 = time.time()
    print('Duration: %.1fs' % (t1 - t0))

    # Evaluate on test data
    error = nn.error(X_test, y_test)
    print('Test error rate: %.4f' % error)
예제 #7
0
    def create_random_brain():
        """ Create a brain with randomized parameters """
        brain = nnet.NeuralNetwork()

        # Logic neurons
        logic_0 = nnet.Neuron("lgc_0")
        logic_1 = nnet.Neuron("lgc_1")
        logic_2 = nnet.Neuron("lgc_2")
        logic_3 = nnet.Neuron("lgc_3")
        # Input (hunger) neuron turns on as health goes down
        hunger = nnet.Neuron("hunger", is_input=True)
        # Input (smell) neurons for nearby Food
        food_left = nnet.Neuron("fd_lft", is_input=True)
        food_right = nnet.Neuron("fd_rght", is_input=True)
        # Input (sight) neurons for nearby Agents
        agent_left = nnet.Neuron("agnt_lft", is_input=True)
        agent_right = nnet.Neuron("agnt_rght", is_input=True)
        # Output (movement) neurons
        move_left = nnet.Neuron("mv_lft")
        move_right = nnet.Neuron("mv_rght")
        # Output (attack) neuron
        attack = nnet.Neuron("atk")

        # Input layer
        brain.add_neuron(agent_left)
        brain.add_neuron(food_left)
        brain.add_neuron(hunger)
        brain.add_neuron(food_right)
        brain.add_neuron(agent_right)
        # Hidden layer
        brain.add_neuron(logic_0)
        brain.add_neuron(logic_1)
        brain.add_neuron(logic_2)
        brain.add_neuron(logic_3)
        # Output layer
        brain.add_neuron(move_left)
        brain.add_neuron(attack)
        brain.add_neuron(move_right)

        # Input to hidden layer: left side
        brain.add_synapse(
            nnet.Synapse(agent_left, logic_0, util.rand(-0.25, 1)))
        brain.add_synapse(nnet.Synapse(agent_left, logic_1, util.rand(-1, 1)))
        brain.add_synapse(
            nnet.Synapse(food_left, logic_0, util.rand(-0.25, 0.25)))
        brain.add_synapse(nnet.Synapse(food_left, logic_1, util.rand(-0.25,
                                                                     1)))
        # Input to hidden layer: center
        brain.add_synapse(nnet.Synapse(hunger, logic_1, util.rand(-0.25,
                                                                  0.75)))
        brain.add_synapse(nnet.Synapse(hunger, logic_2, util.rand(-0.25,
                                                                  0.75)))
        # Input to hidden layer: right side
        brain.add_synapse(
            nnet.Synapse(food_right, logic_2, util.rand(-0.25, 1)))
        brain.add_synapse(
            nnet.Synapse(food_right, logic_3, util.rand(-0.25, 0.25)))
        brain.add_synapse(
            nnet.Synapse(agent_right, logic_2, util.rand(-0.25, 0.25)))
        brain.add_synapse(
            nnet.Synapse(agent_right, logic_3, util.rand(-0.25, 1)))
        # Hidden to output layer: left side
        brain.add_synapse(
            nnet.Synapse(logic_0, move_left, util.rand(-0.25, 0.25)))
        brain.add_synapse(nnet.Synapse(logic_0, attack, util.rand(-0.5, 0.5)))
        brain.add_synapse(nnet.Synapse(logic_1, move_left, util.rand(-0.25,
                                                                     1)))
        brain.add_synapse(nnet.Synapse(logic_1, attack, util.rand(-0.25,
                                                                  0.25)))
        # Hidden to output layer: right side
        brain.add_synapse(nnet.Synapse(logic_2, attack, util.rand(-0.25,
                                                                  0.25)))
        brain.add_synapse(
            nnet.Synapse(logic_2, move_right, util.rand(-0.25, 1)))
        brain.add_synapse(nnet.Synapse(logic_3, attack, util.rand(-0.5, 0.5)))
        brain.add_synapse(
            nnet.Synapse(logic_3, move_right, util.rand(-0.25, 0.25)))

        return brain
예제 #8
0
k_fold = KFold(n_splits=10)

result = []
for index, nf in enumerate(n_feats):
    fold_result = []
    print('*** Starting Test of feat [', n_feats[index], ']...')

    # SETUP one-layer CONVnet
    nn = nnet.NeuralNetwork(layers=[
        nnet.Conv(
            n_feats=nf,
            filter_shape=(5, 5),
            strides=(1, 1),
            weight_scale=0.1,
        ),
        nnet.Activation('relu'),
        nnet.Flatten(),
        nnet.Linear(
            n_out=n_classes,
            weight_scale=0.1,
        ),
        nnet.LogRegression(),
    ], )

    # TRAINING
    for train_indices, valid_indices in k_fold.split(np.array(X_train)):
        np.random.shuffle(train_indices)
        #print(train_indices, valid_indices)
        X_tr = X_train[train_indices, ...]
        Y_tr = Y_train[train_indices, ...]
        X_val = X_train[valid_indices, ...]
예제 #9
0
 def __init__(self, model_config):
     self.model_config = model_config
     self.nnet = nnet.NeuralNetwork(model_config)
예제 #10
0
def optimize_filter(n_train_samples,
                    n_classes,
                    X_train,
                    Y_train,
                    split,
                    weight_decay=0.0):
    train_idxs = np.random.randint(0, split - 1, n_train_samples)
    n_feats = [2, 4, 6, 8, 12, 16]  # for the second layer!

    X_train = X_train[train_idxs, ...]
    Y_train = Y_train[train_idxs, ...]

    one_layer_result = []
    for index, nf in enumerate(n_feats):
        fold_result = []
        print('*** Starting 1-layer test of feat [', nf, ']...')

        # SETUP one-layer CONVnet
        nn = nnet.NeuralNetwork(layers=[
            nnet.Conv(
                n_feats=nf,
                filter_shape=(5, 5),
                strides=(1, 1),
                weight_scale=0.1,
                weight_decay=weight_decay,
            ),
            nnet.Activation('relu'),
            nnet.Flatten(),
            nnet.Linear(
                n_out=n_classes,
                weight_scale=0.1,
            ),
            nnet.LogRegression(),
        ], )

        # TRAINING
        for train_indices, valid_indices in k_fold.split(np.array(X_train)):
            np.random.shuffle(train_indices)
            #print(train_indices, valid_indices)
            X_tr = X_train[train_indices, ...]
            Y_tr = Y_train[train_indices, ...]
            X_val = X_train[valid_indices, ...]
            Y_val = Y_train[valid_indices, ...]

            # Train neural network
            t0 = time.time()
            # TODO: max_iter 50
            nn.fit(X_tr, Y_tr, learning_rate=0.1, max_iter=50, batch_size=30)
            t1 = time.time()

            # Evaluate on test data
            error = nn.error(X_val, Y_val)
            fold_result.append(error)

            print('Duration: %.1fs' % (t1 - t0))
            print('Valid error rate: %.4f' % error)

        # save the result for each n_feat
        one_layer_result.append(np.mean(np.array(fold_result)))

    best_one_layer = n_feats[np.argmin(one_layer_result)]

    # Try two-layer CONVnet
    two_layer_result = []
    for index, nf in enumerate(n_feats):
        fold_result = []
        print('*** Starting 2-layers-test of feat [', nf, ']...')

        # SETUP two-layers CONVnet
        nn = nnet.NeuralNetwork(layers=[
            nnet.Conv(
                n_feats=best_one_layer,
                filter_shape=(5, 5),
                strides=(1, 1),
                weight_scale=0.1,
                weight_decay=weight_decay,
            ),
            nnet.Activation('relu'),
            nnet.Pool(
                pool_shape=(2, 2),
                strides=(2, 2),
                mode='max',
            ),
            nnet.Conv(
                n_feats=nf,
                filter_shape=(5, 5),
                strides=(1, 1),
                weight_scale=0.1,
                weight_decay=weight_decay,
            ),
            nnet.Activation('relu'),
            nnet.Flatten(),
            nnet.Linear(
                n_out=n_classes,
                weight_scale=0.1,
            ),
            nnet.LogRegression(),
        ], )

        # TRAINING
        for train_indices, valid_indices in k_fold.split(np.array(X_train)):
            np.random.shuffle(train_indices)
            #print(train_indices, valid_indices)
            X_tr = X_train[train_indices, ...]
            Y_tr = Y_train[train_indices, ...]
            X_val = X_train[valid_indices, ...]
            Y_val = Y_train[valid_indices, ...]

            # Train neural network
            t0 = time.time()
            # TODO: max_iter 50
            nn.fit(X_tr, Y_tr, learning_rate=0.1, max_iter=50, batch_size=30)
            t1 = time.time()

            # Evaluate on test data
            error = nn.error(X_val, Y_val)
            fold_result.append(error)

            print('Duration: %.1fs' % (t1 - t0))
            print('Valid error rate: %.4f' % error)

        # save the result for each n_feat
        two_layer_result.append(np.mean(np.array(fold_result)))

    best_two_layer = n_feats[np.argmin(two_layer_result)]

    print('One-layer result :', one_layer_result)
    print('Two-layer result :', two_layer_result)
    print('Two-Layer Optimum N_feat Value :', best_one_layer, best_two_layer)

    return best_one_layer, best_two_layer
예제 #11
0
            X.append(x / 1000)
            Y.append(y / 1000)
            L.append(label)

    return ax.scatter(X, Y, c=L, cmap=ListedColormap(colors))


if __name__ == '__main__':

    font = {'family': 'sans', 'size': 16}

    plt.rc('font', **font)

    filename = '%s/nnet/ACASXU_run2a_1_1_batch_2000.nnet' % (
        os.path.dirname(__file__) or '.')
    nn = nnet.NeuralNetwork(filename, True, True)

    filename = '%s/models/ACASXU_1_1.json' % (os.path.dirname(__file__) or '.')
    gbm = vote.Ensemble.from_file(filename)

    fig, (ax1, ax2) = plt.subplots(2, sharex=True, sharey=True)

    render_prediction(ax1, nn.evaluate)
    render_prediction(ax2, lambda xvec: gbm.eval(*xvec))

    legend_elements = [
        Patch(color=colors[0], label='COC'),
        Patch(color=colors[1], label='WL'),
        Patch(color=colors[2], label='WR'),
        Patch(color=colors[3], label='SL'),
        Patch(color=colors[4], label='SR')