예제 #1
0
def test_pool2d_layer():
    random_state = np.random.RandomState(42)
    graph = OrderedDict()
    # 3 channel mnist
    X_r = np.random.randn(10, 3, 28, 28).astype(theano.config.floatX)
    X_sym = add_datasets_to_graph([X_r], ["X"], graph)
    l1 = conv2d_layer([X_sym], graph, 'l1', 5, random_state=random_state)
    l2 = pool2d_layer([l1], graph, 'l2')
    # test that they can stack as well
    l3 = pool2d_layer([l2], graph, 'l3')
    f = theano.function([X_sym], [l1, l2, l3], mode="FAST_COMPILE")
    l1, l2, l3 = f(X_r)
예제 #2
0
valid_indices = mnist["valid_indices"]
X = mnist["images"]
y = mnist["target"]
n_targets = 10
y = convert_to_one_hot(y, n_targets)
minibatch_size = 128

# graph holds information necessary to build layers from parents
graph = OrderedDict()
X_sym, y_sym = add_datasets_to_graph([X[:minibatch_size], y[:minibatch_size]],
                                     ["X", "y"], graph)
# random state so script is deterministic
random_state = np.random.RandomState(1999)

l1 = conv2d_layer([X_sym], graph, 'conv1', 8, random_state=random_state)
l2 = pool2d_layer([l1], graph, 'pool1')
l3 = conv2d_layer([l2], graph, 'conv2', 16, random_state=random_state)
l4 = pool2d_layer([l3], graph, 'pool2')
l5 = l4.reshape((l4.shape[0], -1))
y_pred = softmax_layer([l5],
                       graph,
                       'y_pred',
                       n_targets,
                       random_state=random_state)
nll = categorical_crossentropy(y_pred, y_sym).mean()
cost = nll

params, grads = get_params_and_grads(graph, cost)

learning_rate = 0.001
momentum = 0.9