示例#1
0
def gradient_check():
    '''
    梯度检查
    '''
    labels, data_set = transpose(train_data_set())
    net = Network([8, 3, 8])
    net.gradient_check(data_set[0], labels[0])
    return net
示例#2
0
def gradient_check():
    '''
    梯度检查
    '''
    labels, data_set = transpose(train_data_set())
    net = Network([8, 3, 8])
    net.gradient_check(data_set[0], labels[0])
    return net
示例#3
0
def test():
    labels, data_set = transpose(train_data_set())
    net = Network([8, 8, 8])
    rate = 0.1
    mini_batch = 100
    epoch = 10
    for i in range(epoch):
        net.train(labels, data_set, rate, mini_batch)
        print 'after epoch %d loss: %f' % (
            (i + 1), net.loss(labels[-1], net.predict(data_set[-1])))
        rate /= 2
    correct_ratio(net)
示例#4
0
def test():
    labels, data_set = transpose(train_data_set())
    net = Network([8, 8, 8])
    rate = 0.3
    epoch = 10
    for i in range(epoch):
        net.train(labels, data_set, rate, 10, 100)
        print np.around(net.predict([data_set[-1]]), decimals=3).flatten()
        print 'after epoch %d loss: %f' % (
            (i + 1),
            net.loss(labels[-1], net.predict([data_set[-1]]).reshape(8, 1))
        )
        rate /= 2
    correct_ratio(net)
示例#5
0
def test():
    labels, data_set = transpose(train_data_set())
    net = Network([8, 3, 8])
    rate = 0.5
    mini_batch = 20
    epoch = 10
    for i in range(epoch):
        net.train(labels, data_set, rate, mini_batch)
        print 'after epoch %d loss: %f' % (
            (i + 1),
            net.loss(labels[-1], net.predict(data_set[-1]))
        )
        rate /= 2
    correct_ratio(net)
示例#6
0
	normalizer = Normalizer()
	correct = 0.0
	for i in range(256)
		if normalizer.denorm(network.predict(normalizer.norm(i))) == i:
			correct += 1.0
	print 'correct_ratio:' %.2f%%' % (correct / 256 *100)

def test():
	labels, data_set = transpose(train_data_set())
	net = Network([8, 3, 8])
	rate = 0.5
	mini_batch = 20
	epoch = 10
	for i in range(epoch):
		net.train(labels, data_set, rate, mini_batch)
		print 'after epoch %d loss: %f' % (
			(i + 1),
			net.loss(labels[-1], net.predict(data_set[-1]))
		)
		rate /= 2
	correct_ratio(net)
	
def gradient_check()
	labels, data_set = transpose(train_data_set())
	net = Network([8, 3, 8])
	net.gradient_cheak(data_set[0], labels[0])
	return net