def main(_):
    '''Building the graph, opening of a session and starting the training od the neural network.'''
    
    num_batches=int(FLAGS.num_samples/FLAGS.batch_size)

    with tf.Graph().as_default():

        train_data, train_data_infer=_get_training_data(FLAGS)
        test_data=_get_test_data(FLAGS)
        
        iter_train = train_data.make_initializable_iterator()
        iter_train_infer=train_data_infer.make_initializable_iterator()
        iter_test=test_data.make_initializable_iterator()
        
        x_train= iter_train.get_next()
        x_train_infer=iter_train_infer.get_next()
        x_test=iter_test.get_next()

        model=TrainModel(FLAGS, 'training')

        train_op, train_loss_op=model.train(x_train)
        prediction, labels, test_loss_op, mae_ops=model._validation_loss(x_train_infer, x_test)
        
        saver=tf.train.Saver()
        
        with tf.Session() as sess:
            
            sess.run(tf.global_variables_initializer())
            train_loss=0
            test_loss=[]
            mae=[]

            for epoch in range(FLAGS.num_epoch):
                
                sess.run(iter_train.initializer)
                sess.run(iter_train_infer.initializer)
                sess.run(iter_test.initializer)

                for batch_nr in range(num_batches):
                    
                    _, loss_=sess.run((train_op, train_loss_op))
                    train_loss+=loss_
                
                for i in range(FLAGS.num_samples):
                    
                    pred, labels_, loss_, mae_=sess.run((prediction, labels, test_loss_op,mae_ops))

                    test_loss.append(loss_)
                    mae.append(mae_)
                    
                print('epoch_nr: %i, train_loss: %.3f, test_loss: %.3f, mean_abs_error: %.3f'
                      %(epoch,(train_loss/num_batches),np.mean(test_loss), np.mean(mae)))
                
                if np.mean(mae)<0.9:
                    saver.save(sess, FLAGS.checkpoints_path)

                train_loss=0
                test_loss=[]
                mae=[]
Esempio n. 2
0
def main(_):
    '''Building the graph, opening of a session and starting the training od the neural network.'''

    num_batches = int(FLAGS.num_samples / FLAGS.batch_size)
    train_loss_summary = []
    test_loss_summary = []
    with tf.Graph().as_default():

        train_data, train_data_infer = _get_training_data(FLAGS)
        test_data = _get_test_data(FLAGS)

        iter_train = train_data.make_initializable_iterator()
        iter_train_infer = train_data_infer.make_initializable_iterator()
        iter_test = test_data.make_initializable_iterator()

        x_train = iter_train.get_next()
        x_train_infer = iter_train_infer.get_next()
        x_test = iter_test.get_next()

        model = DAE(FLAGS)

        train_op, train_loss_op = model._optimizer(x_train)
        pred_op, test_loss_op = model._validation_loss(x_train_infer, x_test)

        with tf.Session() as sess:

            sess.run(tf.global_variables_initializer())
            train_loss = 0
            test_loss = 0

            for epoch in range(FLAGS.num_epoch):

                sess.run(iter_train.initializer)

                for batch_nr in range(num_batches):

                    _, loss_ = sess.run((train_op, train_loss_op))
                    train_loss += loss_

                sess.run(iter_train_infer.initializer)
                sess.run(iter_test.initializer)

                for i in range(FLAGS.num_samples):
                    pred, loss_ = sess.run((pred_op, test_loss_op))
                    test_loss += loss_

                print('epoch_nr: %i, train_loss: %.3f, test_loss: %.3f' %
                      (epoch, (train_loss / num_batches),
                       (test_loss / FLAGS.num_samples)))
                train_loss_summary.append(train_loss / num_batches)
                test_loss_summary.append(test_loss / FLAGS.num_samples)
                train_loss = 0
                test_loss = 0
def main(_):
    '''Building the graph, opening of a session and starting the training od the neural network.'''

    num_batches = int(FLAGS.num_samples / FLAGS.batch_size)

    with tf.Graph().as_default():

        train_data, train_data_infer = _get_training_data(FLAGS)
        test_data = _get_test_data(FLAGS)

        iter_train = train_data.make_initializable_iterator()
        iter_train_infer = train_data_infer.make_initializable_iterator()
        iter_test = test_data.make_initializable_iterator()

        x_train = iter_train.get_next()
        x_train_infer = iter_train_infer.get_next()
        x_test = iter_test.get_next()

        model = RBM(FLAGS)

        update_op, accuracy = model.optimize(x_train)
        v_infer = model.inference(x_train_infer)

        with tf.Session() as sess:

            sess.run(tf.global_variables_initializer())

            for epoch in range(FLAGS.num_epoch):

                acc_train = 0
                acc_infer = 0

                sess.run(iter_train.initializer)

                for batch_nr in range(num_batches):
                    _, acc = sess.run((update_op, accuracy))
                    acc_train += acc

                    if batch_nr > 0 and batch_nr % FLAGS.eval_after == 0:

                        sess.run(iter_train_infer.initializer)
                        sess.run(iter_test.initializer)

                        num_valid_batches = 0

                        for i in range(FLAGS.num_samples):

                            v_target = sess.run(x_test)[0]

                            if len(v_target[v_target >= 0]) > 0:

                                v_ = sess.run(v_infer)[0]
                                acc = 1.0 - np.mean(
                                    np.abs(v_[v_target >= 0] -
                                           v_target[v_target >= 0]))
                                acc_infer += acc
                                num_valid_batches += 1

                        print(
                            'epoch_nr: %i, batch: %i/%i, acc_train: %.3f, acc_test: %.3f'
                            % (epoch, batch_nr, num_batches,
                               (acc_train / FLAGS.eval_after),
                               (acc_infer / num_valid_batches)))

                        acc_train = 0
                        acc_infer = 0
Esempio n. 4
0
    'Number of visible neurons (Number of movies the users rated.)')

tf.app.flags.DEFINE_integer('num_h', 128, 'Number of hidden neurons.)')

tf.app.flags.DEFINE_integer(
    'num_samples', 5953,
    'Number of training samples (Number of users, who gave a rating).')
FLAGS = tf.app.flags.FLAGS

num_batches = int(FLAGS.num_samples / FLAGS.batch_size)

with tf.Graph().as_default():
    train_loss_summary = []
    test_loss_summary = []
    train_data, train_data_infer = _get_training_data(FLAGS)
    test_data = _get_test_data(FLAGS)

    iter_train = train_data.make_initializable_iterator()
    iter_train_infer = train_data_infer.make_initializable_iterator()
    iter_test = test_data.make_initializable_iterator()

    x_train = iter_train.get_next()
    x_train_infer = iter_train_infer.get_next()
    x_test = iter_test.get_next()

    model = DAE(FLAGS)

    train_op, train_loss_op = model._optimizer(x_train)
    pred_op, test_loss_op = model._validation_loss(x_train_infer, x_test)

    with tf.Session() as sess: