Пример #1
0
def main(_):
    config = nn_config.NNConfig(FLAGS.conf).get_config()
    model = nn_model.NNModel(config)

    if not tf.gfile.Exists(config.dump_path):
        tf.gfile.MakeDirs(config.dump_path)

    with tf.Session() as sess:
        model.set_session(sess)
        if config.train:
            model.train()
        else:
            model.inference()
Пример #2
0
def main():
    train_x, train_y = ut.read_data('train_x'), ut.read_data("train_y")
    train_x = ut.norm_data(train_x)
    train_y = ut.norm_data(train_y)

    # leaving 20% of the data for validation
    dev_size = int(len(train_x) * 0.2)
    dev_x = train_x[-dev_size:, :]
    dev_y = train_y[-dev_size:]
    train_x, train_y = train_x[:-dev_size, :], train_y[:-dev_size]

    model = nn.NNModel(784,50,10)
    model.train_nn(train_x,train_y,dev_x,dev_y,20,0.1)
    print("hey")
Пример #3
0
        print '****** saving population...'
        count = 0
        for m in population:
            save_model(m, '../saved_models/ga200' + str(count) + '.mdl')
            count += 1


if __name__ == '__main__':
    models = []
    population_size = 50
    if len(sys.argv) > 1:
        for i in range(population_size):
            model = load_model('../saved_models/ga200' + str(i) + '.mdl')
            models.append(model)
    else:
        for i in range(population_size):
            model = nn_mdl.NNModel()
            model.initialize(28 * 28, 256, 128, 10)
            models.append(model)

    train_set, dev_set = load_mnist('../mnist_data')

    roulette_wheel = []
    for i in range(population_size):
        for k in range(int((population_size - i)**0.5)):
            roulette_wheel.append(i)
    for i in range(7):
        rand.shuffle(roulette_wheel)

    train_classifier(train_set, dev_set, 30, models, roulette_wheel)
Пример #4
0
    #         rows = param.shape[0]
    #     mean = np.sum(param) / (rows * cols)
    #     std = np.sqrt(np.sum(np.abs(param - mean)) / (rows * cols))
    #     print 'param mean:', mean
    #     print 'param std:', std
    #     print 'param max', np.max(param)
    #     print 'param min', np.min(param)

    # plt.imshow(np.concatenate((np.concatenate((train_set[0][0:-1].reshape((28,28)), model.feed_forward(train_set[0][0:-1]).reshape((28,28))), axis=1),
    #                            np.concatenate((train_set[1][0:-1].reshape((28, 28)), model.feed_forward(train_set[1][0:-1]).reshape((28, 28))), axis=1),
    #                            np.concatenate((train_set[2][0:-1].reshape((28, 28)), model.feed_forward(train_set[2][0:-1]).reshape((28, 28))), axis=1),
    #                            np.concatenate((train_set[3][0:-1].reshape((28, 28)), model.feed_forward(train_set[3][0:-1]).reshape((28, 28))), axis=1),
    #                            np.concatenate((train_set[4][0:-1].reshape((28, 28)), model.feed_forward(train_set[4][0:-1]).reshape((28, 28))), axis=1),
    #                            np.concatenate((train_set[5][0:-1].reshape((28, 28)), model.feed_forward(train_set[5][0:-1]).reshape((28, 28))), axis=1),),
    #                           axis=0))
    # plt.show()

    # model_file = '../saved_models/nn2classifier.mdl.pickle'
    # model = nn_mdl.NNModel([256, 256, 10])
    model = nn_mdl.NNModel([28 * 28, 400, 200, 10])
    # model = load_model(model_file)
    train_classifier(train_set,
                     dev_set,
                     num_iterations=20,
                     learning_rate=0.01,
                     model=model,
                     regularization=1e-7,
                     model_file=model_file)

    # train_auto_encoder(train_set, dev_set, num_iterations=20, learning_rate=0.001, model=model,