示例#1
0
def backwardpro():
    with tf.Graph().as_default():
        x = tf.placeholder(tf.float32, [None, forward.INPUT_NODE])
        y_ = tf.placeholder(tf.float32, [None, forward.OUTPUT_NODE])
        y = forward.forwardpro(x, REGULARIZER)
        global_step = tf.Variable(0, trainable=False)
        '''
        ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
        cem = tf.reduce_mean(ce)
        loss = cem + tf.add_n(tf.get_collection('losses'))
        '''
        loss = tf.reduce_mean(tf.square(y_ - y)) + tf.add_n(
            tf.get_collection('losses'))

        learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
                                                   global_step,
                                                   train_num_examples /
                                                   BATCH_SIZE,
                                                   LEARNING_RATE_DECAY,
                                                   staircase=True)
        train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
            loss, global_step=global_step)

        ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,
                                                global_step)
        ema_op = ema.apply(tf.trainable_variables())
        with tf.control_dependencies([train_step, ema_op]):
            train_op = tf.no_op(name='train')

        saver = tf.train.Saver()
        wf_batch, pet_batch = generate.get_tfrecord(BATCH_SIZE, isTrain=True)

        with tf.Session() as sess:
            init_op = tf.global_variables_initializer()
            sess.run(init_op)

            ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            for i in range(STEPS):
                xs, ys = sess.run([wf_batch, pet_batch])
                _, loss_value, step = sess.run([train_op, loss, global_step],
                                               feed_dict={
                                                   x: xs,
                                                   y_: ys
                                               })
                if i % 1 == 0:
                    print(
                        "After %d training step(s), loss on training batch is %g."
                        % (step, loss_value))
                    saver.save(sess,
                               os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                               global_step=global_step)

            coord.request_stop()
            coord.join(threads)
示例#2
0
def test():
    with tf.Graph().as_default():
        x = tf.placeholder(tf.float32, [None, forward.INPUT_NODE])
        y_ = tf.placeholder(tf.float32, [None, forward.OUTPUT_NODE])
        y = forward.forwardpro(x, None)
        '''
        ema = tf.train.ExponentialMovingAverage(backward.MOVING_AVERAGE_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)
        '''
        saver = tf.train.Saver()

        y_predict = tf.add(tf.div(tf.sign(tf.subtract(y, 0.5)), 2), 0.5)
        correct_prediction = tf.equal(y_, y_predict)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        wf_batch, pet_batch = generate.get_tfrecord(TEST_NUM, isTrain=False)

        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(backward.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]

                    coord = tf.train.Coordinator()
                    threads = tf.train.start_queue_runners(sess=sess,
                                                           coord=coord)

                    xs, ys = sess.run([wf_batch, pet_batch])

                    y_predict = sess.run(y, feed_dict={x: xs, y_: ys})
                    accuracy_score = sess.run(accuracy,
                                              feed_dict={
                                                  x: xs,
                                                  y_: ys
                                              })

                    precision = np.divide(np.sum(np.multiply(ys, y_predict)),
                                          np.sum(y_predict))
                    recall = np.divide(np.sum(np.multiply(ys, y_predict)),
                                       np.sum(ys))
                    print("After %s training step(s), test accuracy = %g" %
                          (global_step, accuracy_score))
                    print("After %s training step(s), test precision = %g" %
                          (global_step, precision))
                    print("After %s training step(s), test recall = %g" %
                          (global_step, recall))

                    coord.request_stop()
                    coord.join(threads)
                else:
                    print("No checkpoint found")
                    return time.sleep(TEST_INTERVAL_SECS)
示例#3
0
def test():
    with tf.Graph().as_default():
        x = tf.placeholder(tf.float32, [None, forward.INPUT_NODE])
        #y_ = tf.placeholder(tf.float32, [None, forward.OUTPUT_NODE])
        y = forward.forwardpro(x, None)
        '''
        ema = tf.train.ExponentialMovingAverage(backward.MOVING_AVERAGE_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)
        '''
        saver = tf.train.Saver()

        wf_batch, pet_batch = generate.get_tfrecord(TEST_NUM, isTrain=False)

        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(backward.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]

                    coord = tf.train.Coordinator()
                    threads = tf.train.start_queue_runners(sess=sess,
                                                           coord=coord)

                    xs, ys = sess.run([wf_batch, pet_batch])

                    y_value = sess.run(y, feed_dict={x: xs})
                    y_c = np.concatenate([[y_value[:, 1]],
                                          [y_value[:, 0]]]).transpose()
                    y_predict = np.array(y_value > y_c, dtype=np.uint8)
                    #y_predict = ys
                    accuracy_score = np.divide(
                        np.sum(np.multiply(ys, y_predict)),
                        np.array(ys[:, 0]).size)
                    precision = np.divide(
                        np.sum(np.multiply(ys[:, 0], y_predict[:, 0])),
                        np.sum(y_predict[:, 0]))
                    recall = np.divide(
                        np.sum(np.multiply(ys[:, 0], y_predict[:, 0])),
                        np.sum(ys[:, 0]))
                    print("After %s training step(s), test accuracy = %g" %
                          (global_step, accuracy_score))
                    print("After %s training step(s), test precision = %g" %
                          (global_step, precision))
                    print("After %s training step(s), test recall = %g" %
                          (global_step, recall))

                    coord.request_stop()
                    coord.join(threads)
                else:
                    print("No checkpoint found")
                    return time.sleep(TEST_INTERVAL_SECS)
示例#4
0
def backward():

    #
    # x = tf.placeholder(tf.float32,[BATCH_SIZE,forward.IMAGE_SIZE,forward.IMAGE_SIZE,forward.NUM_CHANNELS])
    # y_ = tf.placeholder(tf.float32, [BATCH_SIZE,forward.IMAGE_SIZE,forward.IMAGE_SIZE,forward.NUM_CHANNELS])
    xs, ys = generate.get_tfrecord(BATCH_SIZE, isTrain=True)
    y = forward.forward(x, True, REGULARIZER)
    global_step = tf.Variable(0, trainable=False)
    print('x: ', x)
    print('y_:', y_)
    print('y: ', y)

    #
    #loss = tf.reduce_mean(tf.square(y-y_))
    ce = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(logits=y, labels=y_))
    cem = tf.reduce_mean(ce)
    loss = cem + tf.add_n(tf.get_collection('losses'))
    print('2222: ', tf.square(y_ - y))
    print('loss:', loss)
    #
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
                                               global_step,
                                               num_examples / BATCH_SIZE,
                                               LEARNING_RATE_DECAY,
                                               staircase=True)

    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)
    #train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
    #train_step = tf.train.MomentumOptimizer(learning_rate,[0.1]).minimize(loss)
    #
    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    ema_op = ema.apply(tf.trainable_variables())
    with tf.control_dependencies([train_step, ema_op]):
        train_op = tf.no_op(name='train')
#
    saver = tf.train.Saver()
示例#5
0
def test():
    with tf.Graph().as_default():
        x = tf.placeholder(
            tf.float32,
            [TEST_NUM, 1, generate.Length_waveform, forward.NUM_CHANNELS])
        #y_ = tf.placeholder(tf.float32, [None, forward.OUTPUT_NODE])
        y = forward.forwardpro(x, False, None)

        ema = tf.train.ExponentialMovingAverage(backward.MOVING_AVERAGE_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)

        #saver = tf.train.Saver()

        wf_batch, pet_batch, aver_batch = generate.get_tfrecord(TEST_NUM,
                                                                isTrain=False)
        '''
        y_predict = tf.add(tf.div(tf.sign(tf.subtract(y,0.5)),2),0.5)
        correct_prediction = tf.equal(y_, y_predict)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        '''
        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(backward.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]

                    coord = tf.train.Coordinator()
                    threads = tf.train.start_queue_runners(sess=sess,
                                                           coord=coord)

                    xs, ys, vs = sess.run([wf_batch, pet_batch, aver_batch])
                    reshaped_xs = np.reshape(
                        xs, (TEST_NUM, 1, generate.Length_waveform,
                             forward.NUM_CHANNELS))

                    y_value = sess.run(y, feed_dict={x: reshaped_xs})

                    pe_num = np.around(np.polyval(REG, vs))
                    y_predict = np.zeros_like(y_value)
                    for i in range(TEST_NUM):
                        order_y = np.argsort(y_value[i, :])[::-1]
                        th_v = y_value[i, :][int(order_y[int(
                            np.round((pe_num[i])))])]
                        y_predict[i, :] = np.where(y_value[i, :] > th_v, 1, 0)

                        #correction of bias
                        if np.size(np.where(y_predict[i, :])) != 0:
                            a = np.where(y_predict[i, :] == 1)[0][0]
                            b = np.where(y_predict[i, :] == 1)[0][-1]
                            p = int(np.around((2. * b - 3. * a) / 5))
                            y_predict[i, p::] = 0

                    accuracy_score = np.divide(
                        np.sum(np.multiply(ys, y_predict)), np.sum(ys))
                    precision = np.divide(np.sum(np.multiply(ys, y_predict)),
                                          np.sum(y_predict))
                    recall = np.divide(np.sum(np.multiply(ys, y_predict)),
                                       np.sum(ys))
                    '''
                    y_predict_value = sess.run(y_predict, feed_dict={x: reshaped_xs, y_: ys})
                    accuracy_score = sess.run(accuracy, feed_dict={y_: ys, y_predict: y_predict_value})
                    precision = np.divide(np.sum(np.multiply(ys, y_predict_value)), np.sum(y_predict_value))
                    recall = np.divide(np.sum(np.multiply(ys, y_predict_value)), np.sum(ys))
                    '''
                    print("After %s training step(s), test accuracy = %g" %
                          (global_step, accuracy_score))
                    print("After %s training step(s), test precision = %g" %
                          (global_step, precision))
                    print("After %s training step(s), test recall = %g" %
                          (global_step, recall))

                    coord.request_stop()
                    coord.join(threads)
                else:
                    print("No checkpoint found")
                    return time.sleep(TEST_INTERVAL_SECS)