示例#1
0
# gpu_opinions = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)

with tf.Session(config=tf_config) as sess:

    saver = tf.train.Saver(max_to_keep=None)
    # Initialize parameters
    # sess.run(tf.global_variables_initializer())
    saver.restore(sess, dir_load)

    #Output directory for models and summaries
    timestamp = str(int(time.time()))
    out_dir = os.path.abspath(os.path.join(os.path.curdir, "run", timestamp))
    print("Writing to {}\n".format(out_dir))

    #testing
    test_batches = data_process.batch_iter1(test_file, test_batchSize)
    testOutAll = []
    count = 0
    for batch in test_batches:
        count += 1
        testLoss_out, test_out = sess.run([loss, y_conv],
                                          feed_dict={
                                              x: batch[0],
                                              y_: batch[1],
                                              keep_prob: 1.0
                                          })
        print("step %d, test loss %g" % (count * test_batchSize, testLoss_out))
        testOut_flat = test_out.reshape(-1)
        testOutAll = np.concatenate([testOutAll, testOut_flat])

        # print("the predict:", test_out)
示例#2
0
def main(_):
    #Graph
    with tf.name_scope('input'):
        x = tf.placeholder(tf.float32,
                           [None, 15 * 4 * 101 * 101])  # input data
        y_ = tf.placeholder(tf.float32, [
            None,
        ])  # label
        keep_prob = tf.placeholder(tf.float32)

    with tf.name_scope('input_reshape'):
        x_image = tf.reshape(x, [-1, 4, 101, 101, 1])
        # x_image = tf.transpose(x_image, [0,3,4,1,2])
        x_image_h0 = x_image[:, 0, :, :, :]
        x_image_h1 = x_image[:, 1, :, :, :]
        x_image_h2 = x_image[:, 2, :, :, :]
        x_image_h3 = x_image[:, 3, :, :, :]

    print(x_image_h1.shape)
    fc1_h0 = net(x_image_h0, 0, keep_prob)
    fc1_h1 = net(x_image_h1, 1, keep_prob)
    fc1_h2 = net(x_image_h2, 2, keep_prob)
    fc1_h3 = net(x_image_h3, 3, keep_prob)

    fc1_all = tf.concat([fc1_h0, fc1_h1, fc1_h2, fc1_h3], axis=1)
    print(fc1_all.shape)
    fc2 = slim.fully_connected(inputs=fc1_all,
                               num_outputs=20,
                               activation_fn=None,
                               scope='fc2')
    y_conv = slim.fully_connected(inputs=fc2,
                                  num_outputs=1,
                                  activation_fn=None,
                                  scope='fc3')
    y_conv = tf.reshape(y_conv, [-1])

    # loss function
    loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(y_conv, y_))))

    train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)

    # gpu configuration
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    tf_config.gpu_options.visible_device_list = '0'
    # gpu_opinions = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)

    with tf.Session(config=tf_config) as sess:

        saver = tf.train.Saver(max_to_keep=None)
        # Initialize parameters
        sess.run(tf.global_variables_initializer())
        # saver.restore(sess, dir_load)

        #Output directory for models and summaries
        timestamp = str(int(time.time()))
        out_dir = os.path.abspath(
            os.path.join(os.path.curdir, "run", timestamp))
        print("Writing to {}\n".format(out_dir))

        #summary for loss
        tf.summary.scalar("loss", loss)
        tf.summary.histogram("fc1_all", fc1_all)
        # tf.summary.scalar("learning rate", learning_rate)
        # loss_summary = tf.summary.scalar("learning rate", learning_rate)

        # tf.summary.image("input", x_image_h1, max_outputs = 15)
        merged = tf.summary.merge_all()  # it is useful
        # grad_summaries = []
        # grad_summaries.append(loss_summary)
        # merged = tf.summary.merge(grad_summaries)

        #train summary
        train_summary_dir = os.path.join(out_dir, "summaries", "train")
        train_summary_writer = tf.summary.FileWriter(train_summary_dir,
                                                     sess.graph)
        #dev summary
        dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
        dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)

        # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
        checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
        checkpoint_prefix = os.path.join(checkpoint_dir, "model")
        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir)

        #training
        count = 0
        for i in range(4):
            train_batches = data_process.batch_iter1(train_file,
                                                     batch_size,
                                                     shuffle=True)
            for batch in train_batches:
                count += 1
                # vari_x.extend(batch[2])
                # vari_y.extend(batch[3])
                #training with a batch
                sess.run(train_step,
                         feed_dict={
                             x: batch[0],
                             y_: batch[1],
                             keep_prob: 0.6
                         })
                #training loss
                train_summary, train_loss_out = sess.run([merged, loss],
                                                         feed_dict={
                                                             x: batch[0],
                                                             y_: batch[1],
                                                             keep_prob: 1
                                                         })
                print("epochs %d, step %d, training loss %g" %
                      (i, count * batch_size, train_loss_out))
                train_summary_writer.add_summary(train_summary, count)

                #varification
                # if (count * batch_size) % 3000 == 0:
                if count % 30 == 0:
                    veri_batches = data_process.batch_iter1(
                        veri_file, veri_size)
                    testOutAll = []
                    y_all = []
                    for veri_batch in veri_batches:
                        vari_summary, veri_loss_out, y_veri = sess.run(
                            [merged, loss, y_conv],
                            feed_dict={
                                x: veri_batch[0],
                                y_: veri_batch[1],
                                keep_prob: 1
                            })
                        y_all = np.concatenate([y_all, veri_batch[1]])
                        testOutAll = np.concatenate([testOutAll, y_veri])
                    # print(y_all.shape, testOutAll.shape)
                    err_2 = (testOutAll - y_all)**2
                    rmse = math.sqrt(np.mean(err_2))
                    print("*******epochs %d, step %d, varification loss %g" %
                          (i, count * batch_size, rmse))
                    dev_summary_writer.add_summary(vari_summary, count)

                #save model
                # if (count * batch_size) % 3000 == 0:
                if count % 30 == 0:
                    # if not os.path.exists(dir_model):
                    path = saver.save(sess,
                                      checkpoint_prefix,
                                      global_step=count * batch_size)
                    print("Saved model checkpoint to {}\n".format(path))
示例#3
0
    train_summary_dir = os.path.join(out_dir, "summaries", "train")
    train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
    #dev summary
    dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
    dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)

    # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
    checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
    checkpoint_prefix = os.path.join(checkpoint_dir, "model")
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    #training
    count = 0
    for i in range(4):
        train_batches = data_process.batch_iter1(train_file, batch_size)
        for batch in train_batches:
            count += 1
            # vari_x.extend(batch[2])
            # vari_y.extend(batch[3])
            #training with a batch
            sess.run(train_step,
                     feed_dict={
                         x: batch[0],
                         y_: batch[1],
                         keep_prob: 0.6
                     })
            #training loss
            train_summary, train_loss_out = sess.run([merged, loss],
                                                     feed_dict={
                                                         x: batch[0],