예제 #1
0
    def test_feed_forward(self):
        input_layer = nw.InputLayer(4)
        small_layer = nw.Layer(4)
        output_layer = nw.Layer(4)

        input_layer.set_next_layer(small_layer)
        small_layer.set_next_layer(output_layer)

        small_weights = np.array([[0.5, -0.5, 0.5, 0.5], [0, 0, 0, 0],
                                  [4, -0.5, 0, 0], [0, 0, 0, 0]])
        small_bias = np.array([[1], [2], [-3], [4]])

        small_layer.set_weights(small_weights)
        small_layer.set_biases(small_bias)

        output_weights = np.array([[0, 0, 2, 0]])
        output_bias = np.array([[-1]])

        output_layer.set_weights(output_weights)
        output_layer.set_biases(output_bias)

        small_input = np.array([[1], [2], [3], [4]])
        small_output = np.array([[0.5]])

        np.testing.assert_almost_equal(input_layer.feedforward(small_input),
                                       small_output)
예제 #2
0
    def test_z(self):
        unit_layer = nw.Layer(1)
        small_layer = nw.Layer(4)
        big_layer = nw.Layer(1000)

        unit_weights = np.array([[0.5]])
        small_weights = np.array([[0.5, -0.5, 0.5, 0.5], [0, 0, 0, 0],
                                  [4, -0.5, 0, 0], [0, 0, 0, 0]])
        big_weights = np.ones([1000, 500])

        unit_bias = np.array([[-1]])
        small_bias = np.array([[1], [2], [-3], [4]])
        big_bias = np.ones([1000, 1])

        unit_layer.set_weights(unit_weights)
        small_layer.set_weights(small_weights)
        big_layer.set_weights(big_weights)

        unit_layer.set_biases(unit_bias)
        small_layer.set_biases(small_bias)
        big_layer.set_biases(big_bias)

        unit_input = np.array([[1.5]])
        small_input = np.array([[1], [2], [3], [4]])
        big_input = np.zeros([500, 1])

        unit_output = np.array([[-0.25]])
        small_output = np.array([[4], [2], [0], [4]])
        big_output = np.ones([1000, 1])

        np.testing.assert_almost_equal(unit_layer._get_z(unit_input),
                                       unit_output)
        np.testing.assert_almost_equal(small_layer._get_z(small_input),
                                       small_output)
        np.testing.assert_almost_equal(big_layer._get_z(big_input), big_output)
예제 #3
0
def main():
    mndata = MNIST('MNIST_data')

    mnist_images_train, mnist_labels_train = mndata.load_training()
    mnist_images_test, mnist_labels_test = mndata.load_testing()

    rested_size = 60000
    mnist_images_train = mnist_images_train[:rested_size]
    mnist_labels_train = mnist_labels_train[:rested_size]

    batch_size = 1000
    hm_epochs = 1

    input_layer_size = 784
    hidden_layer_size = 500
    output_layer_size = 10

    nn = network.Network(input_layer_size, [
        network.Layer(hidden_layer_size, input_layer_size, 'tanh'),
        network.Layer(hidden_layer_size, hidden_layer_size, 'tanh'),
        network.Layer(output_layer_size, hidden_layer_size)
    ])

    train(nn, mnist_images_train, mnist_labels_train, batch_size, hm_epochs)
dataset_label = '{}_fs{}_{}prots'.format(c1_dumpfile_name, args.feature_size,
                                         args.s2_prototype_cells)

print('Create C1 layers')
t1 = time.clock()
dumpfile = open(args.c1_dumpfile, 'rb')
ddict = pickle.load(dumpfile)
layer_collection['C1'] = {}
for size, layers_as_dicts in ddict.items():
    layer_list = []
    for layer_as_dict in layers_as_dicts:
        n, m = layer_as_dict['shape']
        spiketrains = layer_as_dict['segment'].spiketrains
        dimensionless_sts = [[s for s in st] for st in spiketrains]
        new_layer = nw.Layer(
            sim.Population(n * m,
                           sim.SpikeSourceArray(spike_times=dimensionless_sts),
                           label=layer_as_dict['label']), (n, m))
        layer_list.append(new_layer)
    layer_collection['C1'][size] = layer_list
print('C1 creation took {} s'.format(time.clock() - t1))

print('Creating S2 layers')
t1 = time.clock()
layer_collection['S2'] = nw.create_S2_layers(layer_collection['C1'],
                                             args.feature_size,
                                             args.s2_prototype_cells,
                                             refrac_s2=6)
print('S2 creation took {} s'.format(time.clock() - t1))

#for layers in layer_collection['C1'].values():
#    for layer in layers:
occurrence = None
#occurrence = re.search('\d+\.\d+blank', validation_dumpfile_name)
if occurrence is not None:
    blanktime = float(occurrence.group()[:-5])

print('Create C1 layers')
t1 = time.clock()
training_ddict = pickle.load(open(args.training_c1_dumpfile, 'rb'))
validation_ddict = pickle.load(open(args.validation_c1_dumpfile, 'rb'))
layer_collection['C1'] = {}
for size, layers_as_dicts in training_ddict.items():
    layer_list = []
    for layer_as_dict in layers_as_dicts:
        n, m = layer_as_dict['shape']
        new_layer = nw.Layer(
            sim.Population(n * m,
                           sim.SpikeSourceArray(),
                           label=layer_as_dict['label']), (n, m))
        layer_list.append(new_layer)
    layer_collection['C1'][size] = layer_list
print('C1 creation took {} s'.format(time.clock() - t1))
t1 = time.clock()
print('Creating S2 layers and reading the epoch weights')
epoch_weights_list = pickle.load(open(args.weights_from, 'rb'))
epoch = epoch_weights_list[-1][0]
weights_dict_list = epoch_weights_list[-1][1]
f_s = int(np.sqrt(list(weights_dict_list[0].values())[0].shape[0]))
s2_prototype_cells = len(weights_dict_list)
layer_collection['S2'] = nw.create_S2_layers(layer_collection['C1'],
                                             f_s,
                                             s2_prototype_cells,
                                             refrac_s2=.1,