Пример #1
0
def main(args=None):
    print(args)
    tf.reset_default_graph()
    """
    Dataset Parser
    """
    # Parse Dataset
    aaai_parser = dataset_parser.AAAIParser('./dataset/AAAI',
                                            target_height=FLAGS.image_height,
                                            target_width=FLAGS.image_width)
    aaai_parser.load_mat_train_paths()
    """
    Pre-trained Model
    """
    model_vgg19 = VGG19(weights='imagenet', include_top=False)
    model_vgg16_pool5 = model_vgg16.get_layer('block5_pool').output
    model_vgg16_pool4 = model_vgg16.get_layer('block4_pool').output
    inputs = model_vgg16.input
    model_vgg16_pool4 = Model(inputs=inputs, outputs=outputs)

    batch, batch_size = 0, 1
    x_batch, y_batch = aaai_parser.load_mat_train_datum_batch(
        batch * batch_size, (batch + 1) * batch_size)
    x_batch = np.array(x_batch, dtype=np.float32)
    # y_batch = np.array(y_batch, dtype=np.int32)

    x = keras.applications.vgg16.preprocess_input(x_batch[:, :, :, :3])
    block4_pool_features = model_vgg16_pool4.predict(x)

    print('hi')
Пример #2
0
def main(args=None):
    print(args)
    tf.reset_default_graph()
    """
    Dataset Parser
    """
    # Parse Dataset
    aaai_parser = dataset_parser.AAAIParser('./dataset/AAAI',
                                            target_height=FLAGS.image_height, target_width=FLAGS.image_width)
    aaai_parser.load_mat_train_dd_paths()
    # Hyper-parameters
    epochs, batch_size = FLAGS.epochs, FLAGS.batch_size
    data_len = len(aaai_parser.mat_train_paths)
    print(data_len)
    batches = data_len // batch_size
    """
    Build Graph
    """
    global_step = tf.Variable(0, trainable=False)
    # Placeholder
    learning_rate = tf.placeholder(tf.float32)
    is_training = tf.placeholder(tf.bool)
    drop_probability = tf.placeholder(tf.float32, name="drop_probability")
    data_x = tf.placeholder(tf.float32, shape=[None, None, None, FLAGS.num_of_feature],
                            name="data_x")
    data_y = tf.placeholder(tf.int32, shape=[None, None, None],
                            name="data_y")
    """
    Network
    """
    logits = simple_ae(x=data_x, drop_probability=drop_probability, is_training=is_training)
    # Loss
    loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=logits, labels=data_y, name="entropy")))
    """
    Optimizer
    """
    trainable_var = tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES, scope='simple_ae')
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_op = tf.train.AdamOptimizer(learning_rate).minimize(
            loss=loss, global_step=global_step, var_list=trainable_var)
    """
    Graph Logs
    """
    tf.summary.scalar("entropy", loss)
    summary_op = tf.summary.merge_all()
    saver = tf.train.Saver(max_to_keep=2)
    """
    Launch Session
    """
    with tf.Session() as sess:
        summary_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/events', sess.graph)
        sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir + '/model')
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            print("Model restored: {}".format(ckpt.model_checkpoint_path))
        else:
            print("No Model found.")

        if FLAGS.mode == 'train':
            cur_learning_rate = FLAGS.learning_rate
            for epoch in range(0, epochs):
                np.random.shuffle(aaai_parser.mat_train_paths)
                for batch in range(0, batches):
                    x_batch, y_batch = aaai_parser.load_mat_train_datum_batch(batch*batch_size, (batch+1)*batch_size)
                    x_batch = np.array(x_batch, dtype=np.float32)[:, :, :, :4]
                    y_batch = np.array(y_batch, dtype=np.int32)
                    feed_dict = {data_x: x_batch, data_y: y_batch,
                                 drop_probability: 0.2, is_training: True, learning_rate: cur_learning_rate}
                    _, loss_sess, global_step_sess = sess.run([train_op, loss, global_step], feed_dict=feed_dict)

                    print('global_setp: {:d}, epoch: [{:d}/{:d}], batch: [{:d}/{:d}], data: {:d}-{:d}, loss: {:f}'
                          .format(global_step_sess, epoch, epochs, batch, batches,
                                  batch*batch_size, (batch+1)*batch_size, loss_sess))

                    if global_step_sess % 10 == 1:
                        summary_str = sess.run(summary_op, feed_dict={
                            data_x: x_batch, data_y: y_batch, drop_probability: 0.0, is_training: False})
                        summary_writer.add_summary(summary_str, global_step_sess)

                    if global_step_sess % 5000 == 1:
                        logits_sess = sess.run(logits, feed_dict={
                            data_x: x_batch, drop_probability: 0.0, is_training: False})
                        print('Logging images..')
                        for batch_idx, mat_train_paths in \
                                enumerate(aaai_parser.mat_train_paths[batch*batch_size:(batch+1)*batch_size]):
                            name = mat_train_paths.split('/')[-1].split('.')[0]
                            scipy.misc.imsave('{}/images/{:d}_{}_0_rgb.png'.format(
                                FLAGS.logs_dir, global_step_sess, name), x_batch[batch_idx, :, :, :3])
                            scipy.misc.imsave('{}/images/{:d}_{}_1_s.png'.format(
                                FLAGS.logs_dir, global_step_sess, name), x_batch[batch_idx, :, :, 3])
                            # scipy.misc.imsave('{}/images/{:d}_{}_2_d.png'.format(
                            #     FLAGS.logs_dir, global_step_sess, name), x_batch[batch_idx, :, :, 4])
                            scipy.misc.imsave('{}/images/{:d}_{}_3_gt.png'.format(
                                FLAGS.logs_dir, global_step_sess, name), y_batch[batch_idx])
                            scipy.misc.imsave('{}/images/{:d}_{}_4_pred.png'.format(
                                FLAGS.logs_dir, global_step_sess, name), np.argmax(logits_sess[batch_idx], axis=2))

                    if global_step_sess % 1500 == 0:
                        print('Saving model...')
                        saver.save(sess, FLAGS.logs_dir + "/model/model.ckpt", global_step=global_step_sess)

        elif FLAGS.mode == 'test':
            print(aaai_parser.test_name)
            aaai_parser.load_mat_test_paths()
            for idx, mat_valid_path in enumerate(aaai_parser.mat_test_paths):
                mat_contents = sio.loadmat(mat_valid_path)
                x = mat_contents['sample'][0][0]['RGBSD']
                x_batch = np.array([x], dtype=np.float32)[:, :, :, :4]
                feed_dict = {data_x: x_batch, drop_probability: 0.0, is_training: False}
                logits_sess = sess.run(logits, feed_dict=feed_dict)
                print('[{:d}/{:d}]'.format(idx, len(aaai_parser.mat_test_paths)))

                name = mat_valid_path.split('/')[-1].split('.')[0]
                scipy.misc.imsave('{}/test/{:d}_{}_0_rgb.png'.format(
                    FLAGS.logs_dir, idx, name), x_batch[0, :, :, :3])
                scipy.misc.imsave('{}/test/{:d}_{}_1_s.png'.format(
                    FLAGS.logs_dir, idx, name), x_batch[0, :, :, 3])
                # scipy.misc.imsave('{}/test/{:d}_{}_2_d.png'.format(
                #     FLAGS.logs_dir, idx, name), x_batch[0, :, :, 4])
                scipy.misc.imsave('{}/test/{:d}_{}_4_pred.png'.format(FLAGS.logs_dir, idx, name),
                                  np.argmax(logits_sess[0], axis=2))
                mat_contents['pred'] = logits_sess
                sio.savemat('./dataset/AAAI/{}_wo/{}'.format(
                    aaai_parser.test_name, name), {'a_dict': mat_contents})
Пример #3
0
import dataset_parser
import numpy as np
import scipy.io as sio

aaai_parser = dataset_parser.AAAIParser('./dataset/AAAI',
                                        target_height=256,
                                        target_width=256)
aaai_parser.load_mat_valid_paths()

mat_contents = sio.loadmat(aaai_parser.mat_valid_paths[0])

mat_contents['pred'] = np.zeros((256, 256), dtype=np.uint8)
name = aaai_parser.mat_valid_paths[0].split('/')[-1]
sio.savemat(name, {'a_dict': mat_contents})
Пример #4
0
def main(args=None):
    print(args)
    tf.reset_default_graph()
    """
    Dataset Parser
    """
    # Parse Dataset
    aaai_parser = dataset_parser.AAAIParser('./dataset/AAAI',
                                            target_height=FLAGS.image_height,
                                            target_width=FLAGS.image_width)
    aaai_parser.load_mat_train_paths()
    # Hyper-parameters
    epochs, batch_size = FLAGS.epochs, FLAGS.batch_size
    data_len = len(aaai_parser.mat_train_paths)
    print(data_len)
    batches = data_len // batch_size
    """
    Build Graph
    """
    # TODO: learning rate decay
    global_step = tf.Variable(0, trainable=False)
    # Placeholder
    with tf.variable_scope("placeholder"):
        learning_rate = tf.placeholder(tf.float32)
        is_training = tf.placeholder(tf.bool)
        drop_probability = tf.placeholder(tf.float32, name="drop_probability")
        data_x = tf.placeholder(tf.float32,
                                shape=[
                                    None, FLAGS.image_height,
                                    FLAGS.image_width, FLAGS.num_of_feature
                                ],
                                name="data_x")
        data_y = tf.placeholder(
            tf.int32,
            shape=[None, FLAGS.image_height, FLAGS.image_width],
            name="data_y")
    """
    Pre-trained Model
    """
    with tf.variable_scope("VGG19"):
        data_preprocess_x = data_x[:, :, :, :3] - np.array(
            [103.939, 116.779, 123.68])
        model_vgg19 = VGG19(weights='imagenet',
                            include_top=False,
                            input_tensor=data_preprocess_x)
        model_vgg16_pool5 = model_vgg19.get_layer('block5_pool').output
        model_vgg16_pool4 = model_vgg19.get_layer('block4_pool').output
        model_vgg16_pool3 = model_vgg19.get_layer('block3_pool').output
        model_vgg16_pool2 = model_vgg19.get_layer('block2_pool').output
        model_vgg16_pool1 = model_vgg19.get_layer('block1_pool').output
    with tf.variable_scope("network"):
        logits = deconv_with_vgg19(x=data_x,
                                   drop_probability=drop_probability,
                                   is_training=is_training,
                                   vgg_pool5=model_vgg16_pool5,
                                   vgg_pool4=model_vgg16_pool4,
                                   vgg_pool3=model_vgg16_pool3,
                                   vgg_pool2=model_vgg16_pool2,
                                   vgg_pool1=model_vgg16_pool1)
    # Loss
    with tf.variable_scope("loss"):
        loss = tf.reduce_mean(
            (tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
                                                            labels=data_y,
                                                            name="entropy")))
    """
    Optimizer
    """
    trainable_var = tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES,
                                      scope='network/deconv_with_vgg19')
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_op = tf.train.AdamOptimizer(learning_rate).minimize(
            loss=loss, global_step=global_step, var_list=trainable_var)
    """
    Graph Logs
    """
    tf.summary.scalar("entropy", loss)
    summary_op = tf.summary.merge_all()
    saver = tf.train.Saver(max_to_keep=2)
    """
    Launch Session
    """
    with tf.Session() as sess:
        summary_writer = tf.summary.FileWriter(FLAGS.logs_dir, sess.graph)
        sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            print("Model restored!")
        else:
            print("No Model found.")

        cur_learning_rate = FLAGS.learning_rate
        for epoch in range(0, epochs):
            np.random.shuffle(aaai_parser.mat_train_paths)
            for batch in range(0, batches):
                x_batch, y_batch = aaai_parser.load_mat_train_datum_batch(
                    batch * batch_size, (batch + 1) * batch_size)
                x_batch = np.array(x_batch, dtype=np.float32)
                y_batch = np.array(y_batch, dtype=np.int32)
                feed_dict = {
                    data_x: x_batch,
                    data_y: y_batch,
                    drop_probability: 0.2,
                    is_training: True,
                    learning_rate: cur_learning_rate
                }
                _, loss_sess, global_step_sess = sess.run(
                    [train_op, loss, global_step], feed_dict=feed_dict)

                print(
                    'global_setp: {:d}, epoch: [{:d}/{:d}], batch: [{:d}/{:d}], data: {:d}-{:d}, loss: {:f}'
                    .format(global_step_sess, epoch, epochs, batch, batches,
                            batch * batch_size, (batch + 1) * batch_size,
                            loss_sess))

                if global_step_sess % 10 == 1:
                    summary_str = sess.run(summary_op,
                                           feed_dict={
                                               data_x: x_batch,
                                               data_y: y_batch,
                                               drop_probability: 0.0,
                                               is_training: False
                                           })
                    summary_writer.add_summary(summary_str, global_step_sess)

                if global_step_sess % 150 == 1:
                    logits_sess = sess.run(logits,
                                           feed_dict={
                                               data_x: x_batch,
                                               drop_probability: 0.0,
                                               is_training: False
                                           })
                    print('Logging images..')
                    for batch_idx, mat_train_paths in \
                            enumerate(aaai_parser.mat_train_paths[batch*batch_size:(batch+1)*batch_size]):
                        name = mat_train_paths.split('/')[-1].split('.')[0]
                        scipy.misc.imsave(
                            '{}/images/{:d}_{}_0_rgb.png'.format(
                                FLAGS.logs_dir, global_step_sess, name),
                            x_batch[batch_idx, :, :, :3])
                        scipy.misc.imsave(
                            '{}/images/{:d}_{}_1_s.png'.format(
                                FLAGS.logs_dir, global_step_sess, name),
                            x_batch[batch_idx, :, :, 3])
                        scipy.misc.imsave(
                            '{}/images/{:d}_{}_2_d.png'.format(
                                FLAGS.logs_dir, global_step_sess, name),
                            x_batch[batch_idx, :, :, 4])
                        scipy.misc.imsave(
                            '{}/images/{:d}_{}_3_gt.png'.format(
                                FLAGS.logs_dir, global_step_sess, name),
                            y_batch[batch_idx])
                        scipy.misc.imsave(
                            '{}/images/{:d}_{}_4_pred.png'.format(
                                FLAGS.logs_dir, global_step_sess, name),
                            np.argmax(logits_sess[batch_idx], axis=2))

                if global_step_sess % 500 == 0:
                    print('Saving model...')
                    saver.save(sess,
                               FLAGS.logs_dir + "/model.ckpt",
                               global_step=global_step_sess)