Beispiel #1
0
FOURTH_LAYER_INPUT = 15
LABELS_NUM = 2
NUM_OF_SAMPLES = 100
x = np.random.random((NUM_OF_SAMPLES, FIRST_LAYER_INPUT)).T
#x = np.array([[1, 0], [2, 1], [4, 1], [5, 2], [1, 4], [2, 3], [0.9, 1],
#              [2, 5], [1, 5], [6, 3]]).T
#NUM_OF_SAMPLES = 100
y_arr = []
for i in range(NUM_OF_SAMPLES):
    if i % 2 == 0:
        y_arr.append(np.array([1, 0]))
    else:
        y_arr.append(np.array([0, 1]))
y = np.array(y_arr).T
n = Network()
n.add_layer(Layer(FIRST_LAYER_INPUT, SECOND_LAYER_INPUT, TANH_ACTIVATION))
n.add_layer(Layer(SECOND_LAYER_INPUT, THIRD_LAYER_INPUT, TANH_ACTIVATION))
n.add_layer(Layer(THIRD_LAYER_INPUT, FOURTH_LAYER_INPUT, TANH_ACTIVATION))
n.add_layer(Layer(FOURTH_LAYER_INPUT, LABELS_NUM, None, softmax_layer=True))
curr_batch = np.random.permutation(range(NUM_OF_SAMPLES))
batch_x = np.array([x.T[ind] for ind in curr_batch]).T
batch_y = np.array([y.T[ind] for ind in curr_batch]).T
l = n.get_layer(0)

# Softmax test functions

l_sm = Layer(FIRST_LAYER_INPUT, LABELS_NUM, None, softmax_layer=True)

W_EXAMPLE = np.array([[2, 0], [0, 1]]).T
B_EXAMPLE = (np.array([1, 1]))
C1_EXAMPLE = np.atleast_2d(np.array([0])).T
Beispiel #2
0
    plt.scatter(coord_y_pos, coord_x_pos, alpha=0.2)
    plt.scatter(coord_y_neg, coord_x_neg, alpha=0.2)
    plt.savefig('spirals/plot_{}.png'.format(ind))


mat = scipy.io.loadmat('SwissRollData.mat')
labels = mat['Ct']
training = mat['Yt']
labels_validation = mat['Cv']
samples_validation = mat['Yv']
x = training
y = labels
x_validation = samples_validation
y_validation = labels_validation
n = Network()
n.add_layer(Layer(2, 10, TANH_ACTIVATION))
#n.add_layer(Layer(10, 10, TANH_ACTIVATION))
#n.add_layer(Layer(10, 10, TANH_ACTIVATION))
n.add_layer(Layer(10, 10, TANH_ACTIVATION))
n.add_layer(Layer(10, 2, None, softmax_layer=True))
epochs = [20]
batch_sizes = [100]
learning_rates = [0.5]
for epoch, batch_size, learning_rate in product(epochs, batch_sizes,
                                                learning_rates):
    print('epochs {} batch_size {} learning_rate {}'.format(
        epoch, batch_size, learning_rate))
    n, obj = stochastic_gradient_descent(n,
                                         x,
                                         y,
                                         batch_size=batch_size,