コード例 #1
0
def main():
    training_data, validation_data, test_data = mnist.load_data_wrapper()
    #training_inputs, training_outputs = zip(*training_data)
    #validation_inputs, validation_outputs = zip(*validation_data)
    #test_inputs, test_outputs = zip(*test_data)
    training_data = list(training_data)
    validation_data = list(validation_data)
    test_data = list(test_data)

    batch_size = 100
    training_rate = 0.1
    min_error = 0.001
    net = FeedForwardNetwork([784, 50, 50, 10], [sigmoid] * 3, [d_sigmoid] * 3, squared_error, d_squared_error, 10)
    net.train_to_accuracy(training_data, validation_data, training_rate, min_error, batch_size, False)
    correct = 0
    total_error = 0
    maxdex = 0
    maximum = -1
    print('Validating network')
    for data_point in validation_data:
        output, error = net.prop_to_and_fro(data_point[0], data_point[1], 0)
        total_error += error
        if np.argmin(output) == np.argmin(data_point[1]):
            correct += 1

    print("Done! Final accuracy: " + str(correct / len(validation_data)) + '%')
コード例 #2
0
#activation_functions.PReLU(learning_methods.Momentum(.01,.7),20),
#activation_functions.PReLU(learning_methods.Momentum(.01,.7),30)
network_activations = [activation_functions.Tanh(), \
activation_functions.Softmax()]


def reduceL(t):
    for index, v in enumerate(t):
        x, y = v
        t[index] = x, np.argmax(y)
    return t


eta = 3
lmbda = 0
epochs = 64
mini_batch = 10
net=fcnetwork.FCNetwork(network_topology, \
                        network_activations, \
                        cost_functions.CrossEntropy(),\
                        None, \
                        [bn.BNLayer(learning_methods.Momentum(.01,.7),.06,(30,mini_batch))]\
                        )
train, valid, test = mnist_loader.load_data_wrapper()

train_list = list(train)
#reg.L2Reg(lbmda,len(train_list))
net.learn(train_list,epochs,mini_batch, \
learning_methods.Momentum(eta,.7,reg.L2Reg(lmbda,len(train_list))), \
test_data=[reduceL(train_list[:10000]),list(test),list(valid)])
コード例 #3
0
def extract_first_n(tf_training_data, n):
    x, y = tf_training_data
    x = [
        t.ravel() for t, m in zip(x, y)
        if (sum(m[:n]) != 0 if not isinstance(m, np.int64) else (m < n))
    ]
    f = lambda t: np.argmax(t) if not isinstance(t, np.int64) else t
    y = [
        f(t) for t in y
        if (sum(t[:n]) != 0 if not isinstance(t, np.int64) else (t < n))
    ]
    return x, y


train, valid, test = load_data_wrapper()

init = tf.global_variables_initializer()
saver = tf.train.Saver()

test_data = True
valid_data = False
with tf.Session() as sess:
    init.run()

    now = datetime.utcnow().strftime(("%Y%m%d%H%M%S"))
    root_logdir = "tf_logs"
    logdir = "{}/run-{}".format(root_logdir, now)

    file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
    cost_log = tf.summary.scalar("Cost", loss)
コード例 #4
0
ファイル: main.py プロジェクト: tathagatoroy/ML-algorithms
import network
import mnist

(training_data, validation_data, test_data, training_size, validation_size,
 test_size) = mnist.load_data_wrapper()
roy_net = network.neuralnet([784, 50, 10])

#print(test_data)
roy_net.SGD(training_data, 30, 15, 1)

val = roy_net.evaluate(validation_data)
print("Result of validation : {0} pictures identified correctly out of {1} ".
      format(val, validation_size))
test = roy_net.evaluate(test_data)
print("Result of testing  : {0} pictures identified correctly out of {1} ".
      format(test, test_size))
コード例 #5
0
ファイル: run.py プロジェクト: mikeheddes/DC_NN
import mnist
import numpy as np
import neural_net as nn

training_data, validation_data, test_data = mnist.load_data_wrapper()

m = nn.Model()
W = nn.variance()
m.Add(nn.Input(784))
m.Add(nn.Dense(200, W_init=W))
m.Add(nn.leaky_relu())
m.Add(nn.Dropout(pKeep=0.75))
m.Add(nn.Dense(80, W_init=W))
m.Add(nn.leaky_relu())
m.Add(nn.Dropout(pKeep=0.75))
m.Add(nn.Dense(10, W_init=W))
m.Add(nn.softmax())

m.Compile(optimizer=nn.L2())

LR = nn.learning_rate(0.1, to=0.001, func="STEP")
m.Train(list(training_data),
        loss=nn.cross_entropy,
        batch_size=10,
        epochs=50,
        learning_rate=LR,
        test_data=list(test_data))