Example #1
0
def train():
    c = constants()
    p = params()

    x, y = f.inputs([f.TRAIN_TF_RECORDS_FILE], shuffle=True, name='inputs')
    u.shape_log(x)
    u.shape_log(y)

    logits = model(x, c)

    logits = tf.reshape(logits, [-1, c.MAX_TIME, f.OUTPUT_DIM])
    labels = tf.reshape(y, [-1, c.MAX_TIME, f.OUTPUT_DIM])

    loss = tf.reduce_mean(tf.square(logits - labels))

    train_op = tf.train.AdamOptimizer(learning_rate=0.05).minimize(
        loss, global_step=p.global_step)
    init_op = tf.global_variables_initializer()

    with tf.Session() as sess:

        # initialize global variables
        sess.run(init_op)

        # enable batch fetchers
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        #setup saver
        saver = u.saver_ops(sess=sess,
                            max_to_keep=3,
                            global_step=p.global_step)

        eng = mo.matlab_connection(c.MATLAB_SESSION)

        # run the training operation
        for indx in range(20000):
            _, log = sess.run([train_op, logits])
            # logi.append(log)

            u.display_update(indx, 25)

            if ((indx + 1) % 2000 == 0):
                saver.save()

            if ((indx) % 5 == 0):
                # CHECK EMERGENCY STOP
                if eng.should_stop():
                    print('stopping')
                    break

        # connect to matlab
        # eng = mo.matlab_connection(c.MATLAB_SESSION)
        # eng.put_var('logits', logi)

        saver.save()
        coord.request_stop()
        coord.join(threads)
Example #2
0
def model(X, c):
    cells = [rnn.LSTMCell(num_units=c.NUM_UNITS) for _ in range(c.NUM_LAYERS)]
    multicell = rnn.MultiRNNCell(cells, state_is_tuple=True)

    reshape_op = tf.reshape(X, [-1, c.MAX_TIME, 1], name='reshape_op')

    outputs, H = tf.nn.dynamic_rnn(multicell, reshape_op, dtype=tf.float32)

    reshape_op = tf.reshape(outputs, [-1, c.NUM_UNITS])
    dense_op = u.distributed_dense(reshape_op, units=1, max_time=100)

    logits = tf.reshape(dense_op, [c.MAX_TIME])
    u.shape_log(logits)

    return logits
Example #3
0
def train():
    c = constants()
    p = params()

    x, y = f.inputs([f.TRAIN_TF_RECORDS_FILE], name='inputs')
    u.shape_log(x)
    u.shape_log(y)

    logits = model(x, c)

    labels = tf.reshape(y, [c.MAX_TIME])

    loss = tf.reduce_mean(tf.square(logits - labels))

    train_op = tf.train.AdamOptimizer(learning_rate=0.05).minimize(
        loss, global_step=p.global_step)
    init_op = tf.global_variables_initializer()

    with tf.Session() as sess:

        # initialize global variables
        sess.run(init_op)

        # enable batch fetchers
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        #setup saver
        saver = u.saver_ops(sess=sess,
                            max_to_keep=3,
                            global_step=p.global_step)

        logi = []

        # run the training operation
        for indx in range(10000):
            _, log = sess.run([train_op, logits])
            logi.append(log)

            u.display_update(indx, 50)

        # connect to matlab
        eng = mo.matlab_connection(c.MATLAB_SESSION)
        eng.put_var('logits', logi)

        saver.save()
        coord.request_stop()
        coord.join(threads)
Example #4
0
def test():
    c = constants()
    p = params()

    # connect to matlab
    eng = mo.matlab_connection(c.MATLAB_SESSION)

    x, y = f.inputs([f.TEST_TF_RECORDS_FILE], shuffle=True, name='inputs')
    u.shape_log(x)
    u.shape_log(y)

    logits = model(x, c)
    logits = tf.reshape(logits, [-1, c.MAX_TIME, f.OUTPUT_DIM])
    labels = tf.reshape(y, [-1, c.MAX_TIME, f.OUTPUT_DIM])

    init_op = tf.global_variables_initializer()

    with tf.Session() as sess:

        # initialize global variables
        sess.run(init_op)

        # enable batch fetchers
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        #setup saver
        saver = u.saver_ops(sess=sess,
                            max_to_keep=3,
                            global_step=p.global_step)

        logi = []
        labs = []

        # run the training operation
        for indx in range(6):
            log, lab = sess.run([logits, labels])
            logi.append(log)
            labs.append(lab)

            u.display_update(indx, 250)

        eng.put_var('logits', logi)
        eng.put_var('labels', labs)

        coord.request_stop()
        coord.join(threads)
Example #5
0
def model(X, c):
    cells = [rnn.LSTMCell(num_units=c.NUM_UNITS) for _ in range(c.NUM_LAYERS)]
    multicell = rnn.MultiRNNCell(cells, state_is_tuple=True)

    reshape_op = tf.reshape(X, [1, 100, 1], name='reshape_op')

    outputs, H = tf.nn.dynamic_rnn(multicell, reshape_op, dtype=tf.float32)

    u.shape_log(outputs)

    # outputs has shape => [batch_size, max_time, num_units]
    # reshape it to     => [batch_size*max_time, num_units]
    # apply a fully connected layer with shared weight and bias
    # that reshapes to  => [batch_size*max_time, n_outputs]

    reshape_op = tf.reshape(outputs, [-1, c.NUM_UNITS])
    unit_kern = tf.Variable(tf.random_normal([1, 1]), dtype=tf.float32)
    unit_bias = tf.Variable(tf.random_normal([1, 1]), dtype=tf.float32)
    kern = tf.matmul(tf.ones([c.NUM_UNITS, 1], dtype=tf.float32), unit_kern)
    bias = tf.matmul(tf.ones([100, 1], dtype=tf.float32), unit_bias)

    mult_op = tf.matmul(reshape_op, kern, name='multop')
    dense_op = tf.add(mult_op, bias, name='addop')

    u.shape_log(dense_op)

    logits = tf.reshape(dense_op, [100])
    u.shape_log(logits)

    return logits
Example #6
0
def model(X, c):
    cells = [rnn.LSTMCell(num_units=c.NUM_UNITS) for _ in range(c.NUM_LAYERS)]
    multicell = rnn.MultiRNNCell(cells, state_is_tuple=True)

    reshape_op = tf.reshape(X, [-1, c.MAX_TIME, 2], name='reshape_op')

    outputs, H = tf.nn.dynamic_rnn(multicell, reshape_op, dtype=tf.float32)

    u.shape_log(outputs)

    reshape_op = tf.reshape(outputs, [-1, c.NUM_UNITS])
    u.shape_log(reshape_op)
    # dense_op   = u.distributed_dense(reshape_op, units=2, max_time=100)
    dense_op = tf.layers.dense(reshape_op, 2, name='distributed__dense')
    u.shape_log(dense_op)

    w, b = u.get_layer_vars('distributed__dense')

    logits = tf.reshape(dense_op, [c.MAX_TIME * f.OUTPUT_DIM])
    u.shape_log(logits)

    return logits