예제 #1
0
def eval(logdir='logdir/default/train1', queue=False):
    # Load graph
    model = Model(mode="test1", batch_size=hp.Test1.batch_size, queue=queue)

    # Accuracy
    acc_op = model.acc_net1()

    # Loss
    loss_op = model.loss_net1()

    # Summary
    summ_op = summaries(acc_op, loss_op)

    session_conf = tf.ConfigProto(
        allow_soft_placement=True,
        device_count={
            'CPU': 1,
            'GPU': 0
        },
    )
    with tf.Session(config=session_conf) as sess:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        writer = tf.summary.FileWriter(logdir, sess.graph)

        # Load trained model
        sess.run(tf.global_variables_initializer())
        model.load(sess, 'train1', logdir=logdir)

        if queue:
            summ, acc, loss = sess.run([summ_op, acc_op, loss_op])
        else:
            mfcc, ppg = get_batch(model.mode, model.batch_size)
            summ, acc, loss = sess.run([summ_op, acc_op, loss_op],
                                       feed_dict={
                                           model.x_mfcc: mfcc,
                                           model.y_ppgs: ppg
                                       })

        writer.add_summary(summ)

        print("acc:", acc)
        print("loss:", loss)
        print('\n')

        writer.close()

        coord.request_stop()
        coord.join(threads)
예제 #2
0
def eval(logdir, hparams):
    # Load graph
    model = Model(mode="test1", hparams=hparams)

    # Accuracy
    acc_op = model.acc_net1()

    # Loss
    loss_op = model.loss_net1()

    # Summary
    summ_op = summaries(acc_op, loss_op)

    #session_conf = tf.ConfigProto(
    #    allow_soft_placement=True,
    #    device_count={'CPU': 1, 'GPU': 0},
    #)

    session_conf = tf.ConfigProto()
    session_conf.gpu_options.per_process_gpu_memory_fraction = 0.9

    with tf.Session(config=session_conf) as sess:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        writer = tf.summary.FileWriter(logdir, sess.graph)

        # Load trained model
        sess.run(tf.global_variables_initializer())
        model.load(sess, 'train1', logdir=logdir)

        mfcc, ppg = get_batch(model.mode, model.batch_size)
        summ, acc, loss = sess.run([summ_op, acc_op, loss_op],
                                   feed_dict={
                                       model.x_mfcc: mfcc,
                                       model.y_ppgs: ppg
                                   })

        writer.add_summary(summ)

        print("acc:", acc)
        print("loss:", loss)
        print('\n')

        writer.close()

        coord.request_stop()
        coord.join(threads)
예제 #3
0
def eval(logdir='logdir/default/train1', queue=False):
    # Load graph
    model = Model(mode="test1", batch_size=hp.Test1.batch_size, queue=queue)

    # Accuracy
    acc_op = model.acc_net1()

    # Loss
    loss_op = model.loss_net1()

    # Summary
    summ_op = summaries(acc_op, loss_op)

    session_conf = tf.ConfigProto(
        allow_soft_placement=True,
        device_count={'CPU': 1, 'GPU': 0},
    )
    with tf.Session(config=session_conf) as sess:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        writer = tf.summary.FileWriter(logdir, sess.graph)

        # Load trained model
        sess.run(tf.global_variables_initializer())
        model.load(sess, 'train1', logdir=logdir)

        if queue:
            summ, acc, loss = sess.run([summ_op, acc_op, loss_op])
        else:
            mfcc, ppg = get_batch(model.mode, model.batch_size)
            summ, acc, loss = sess.run([summ_op, acc_op, loss_op], feed_dict={model.x_mfcc: mfcc, model.y_ppgs: ppg})

        writer.add_summary(summ)

        print("acc:", acc)
        print("loss:", loss)
        print('\n')

        writer.close()

        coord.request_stop()
        coord.join(threads)
예제 #4
0
def train(logdir, hparams):

    model = Model(mode="train1", hparams=hparams)

    # Loss
    loss_op = model.loss_net1()

    # Accuracy
    acc_op = model.acc_net1()

    # Training Scheme
    global_step = tf.Variable(0, name='global_step', trainable=False)

    optimizer = tf.train.AdamOptimizer(learning_rate=hparams.Train1.lr)
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'net/net1')
        train_op = optimizer.minimize(loss_op, global_step=global_step, var_list=var_list)

    # Summary
    # for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'net/net1'):
    #     tf.summary.histogram(v.name, v)
    tf.summary.scalar('net1/train/loss', loss_op)
    tf.summary.scalar('net1/train/acc', acc_op)
    summ_op = tf.summary.merge_all()

    #session_conf = tf.ConfigProto(
    #    gpu_options=tf.GPUOptions(
    #        allow_growth=True,
    #    ),
    #)

    session_conf=tf.ConfigProto()
    session_conf.gpu_options.per_process_gpu_memory_fraction=0.9

    # Training
    with tf.Session(config=session_conf) as sess:
        # Load trained model
        sess.run(tf.global_variables_initializer())
        model.load(sess, 'train1', logdir=logdir)

        writer = tf.summary.FileWriter(logdir, sess.graph)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        for epoch in range(1, hparams.Train1.num_epochs + 1):
            for step in range(model.num_batch):
                mfcc, ppg = get_batch(model.mode, model.batch_size)
                sess.run(train_op, feed_dict={model.x_mfcc: mfcc, model.y_ppgs: ppg})

            # Write checkpoint files at every epoch
            summ, gs = sess.run([summ_op, global_step], feed_dict={model.x_mfcc: mfcc, model.y_ppgs: ppg})


            if epoch % hparams.Train1.save_per_epoch == 0:
                tf.train.Saver().save(sess, '{}/epoch_{}_step_{}'.format(logdir, epoch, gs))

            # Write eval accuracy at every epoch
            with tf.Graph().as_default():
                eval1.eval(logdir=logdir, hparams=hparams)

            writer.add_summary(summ, global_step=gs)

        writer.close()
        coord.request_stop()
        coord.join(threads)
예제 #5
0
def train(logdir='logdir/default/train1', queue=True):
    model = Model(mode="train1", batch_size=hp.Train1.batch_size, queue=queue)

    # Loss
    loss_op = model.loss_net1()

    # Accuracy
    acc_op = model.acc_net1()

    # Training Scheme
    global_step = tf.Variable(0, name='global_step', trainable=False)

    optimizer = tf.train.AdamOptimizer(learning_rate=hp.Train1.lr)
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                     'net/net1')
        train_op = optimizer.minimize(loss_op,
                                      global_step=global_step,
                                      var_list=var_list)

    # Summary
    for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'net/net1'):
        tf.summary.histogram(v.name, v)
    tf.summary.scalar('net1/train/loss', loss_op)
    tf.summary.scalar('net1/train/acc', acc_op)
    summ_op = tf.summary.merge_all()

    session_conf = tf.ConfigProto(gpu_options=tf.GPUOptions(
        allow_growth=True, ), )
    # Training
    with tf.Session(config=session_conf) as sess:
        # Load trained model
        sess.run(tf.global_variables_initializer())
        model.load(sess, 'train1', logdir=logdir)

        writer = tf.summary.FileWriter(logdir, sess.graph)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        for epoch in range(1, hp.Train1.num_epochs + 1):
            for step in tqdm(range(model.num_batch),
                             total=model.num_batch,
                             ncols=70,
                             leave=False,
                             unit='b'):
                if queue:
                    sess.run(train_op)
                else:
                    mfcc, ppg = get_batch(model.mode, model.batch_size)
                    #print("MFCC shape: {}".format(mfcc.shape))
                    #print("types: {} and {}".format(mfcc.dtype, ppg.dtype))
                    #print("PPG shape: {}".format(ppg.shape))
                    sess.run(train_op,
                             feed_dict={
                                 model.x_mfcc: mfcc,
                                 model.y_ppgs: ppg
                             })

            # Write checkpoint files at every epoch
            summ, gs = sess.run([summ_op, global_step],
                                feed_dict={
                                    model.x_mfcc: mfcc,
                                    model.y_ppgs: ppg
                                })
            # There was a problem where in certain environments placeholder must be fed for these ops
            if epoch % hp.Train1.save_per_epoch == 0:
                tf.train.Saver().save(
                    sess, '{}/epoch_{}_step_{}'.format(logdir, epoch, gs))

            # Write eval accuracy at every epoch
            with tf.Graph().as_default():
                eval1.eval(logdir=logdir, queue=False)

            writer.add_summary(summ, global_step=gs)

        writer.close()
        coord.request_stop()
        coord.join(threads)
예제 #6
0
def train(logdir='logdir/default/train1', queue=True):
    model = Model(mode="train1", batch_size=hp.Train1.batch_size, queue=queue)

    # Loss
    loss_op = model.loss_net1()

    # Accuracy
    acc_op = model.acc_net1()

    # Training Scheme
    global_step = tf.Variable(0, name='global_step', trainable=False)

    optimizer = tf.train.AdamOptimizer(learning_rate=hp.Train1.lr)
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'net/net1')
        train_op = optimizer.minimize(loss_op, global_step=global_step, var_list=var_list)

    # Summary
    for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'net/net1'):
        tf.summary.histogram(v.name, v)
    tf.summary.scalar('net1/train/loss', loss_op)
    tf.summary.scalar('net1/train/acc', acc_op)
    summ_op = tf.summary.merge_all()

    session_conf = tf.ConfigProto(
        gpu_options=tf.GPUOptions(
            allow_growth=True,
        ),
    )
    # Training
    with tf.Session(config=session_conf) as sess:
        # Load trained model
        sess.run(tf.global_variables_initializer())
        model.load(sess, 'train1', logdir=logdir)

        writer = tf.summary.FileWriter(logdir, sess.graph)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        for epoch in range(1, hp.Train1.num_epochs + 1):
            for step in tqdm(range(model.num_batch), total=model.num_batch, ncols=70, leave=False, unit='b'):
                if queue:
                    sess.run(train_op)
                else:
                    mfcc, ppg = get_batch(model.mode, model.batch_size)
		    #print("MFCC shape: {}".format(mfcc.shape))
		    #print("types: {} and {}".format(mfcc.dtype, ppg.dtype))
		    #print("PPG shape: {}".format(ppg.shape))
                    sess.run(train_op, feed_dict={model.x_mfcc: mfcc, model.y_ppgs: ppg})

            # Write checkpoint files at every epoch
            summ, gs = sess.run([summ_op, global_step], feed_dict={model.x_mfcc:  mfcc, model.y_ppgs: ppg})
	    # There was a problem where in certain environments placeholder must be fed for these ops
            if epoch % hp.Train1.save_per_epoch == 0:
                tf.train.Saver().save(sess, '{}/epoch_{}_step_{}'.format(logdir, epoch, gs))

            # Write eval accuracy at every epoch
            with tf.Graph().as_default():
                eval1.eval(logdir=logdir, queue=False)

            writer.add_summary(summ, global_step=gs)

        writer.close()
        coord.request_stop()
        coord.join(threads)