Пример #1
0
 def __init__(self, sess, weights=None):
     self.sess = sess
     self.CATEGORY_NUM = 151
     self.IMAGE_SIZE = [224, 224]
     self.IMAGE_CHANNEL = 3
     data_dir = '/home/give/Documents/dataset/ADEChallengeData2016'
     self.model_save_path = '/home/give/PycharmProjects/MyFCN/pretrain_vgg16/model/'
     self.dataset = DataSet(data_dir)
     self.learning_rate = 1e-3
     self.itertator_number = int(1e+5)
     self.learning_rate_decay = 0.9
     self.BATCH_SIZE = 20
     self.imgs = tf.placeholder(tf.float32,
                                shape=[
                                    self.BATCH_SIZE, self.IMAGE_SIZE[0],
                                    self.IMAGE_SIZE[0], self.IMAGE_CHANNEL
                                ])
     self.y_ = tf.placeholder(tf.float32,
                              shape=[
                                  self.BATCH_SIZE, self.IMAGE_SIZE[0],
                                  self.IMAGE_SIZE[1], self.CATEGORY_NUM
                              ])
     tf.summary.image("input_image", self.imgs, max_outputs=2)
     self.layers_name = [
         'conv6_1',
         'conv6_2',
         'conv6_3',
         'upconv1_1',
         'pooling4_score',
         'upconv2_1',
         'pooling3_score',
         'upconv3_1',
     ]
     tf.summary.image("ground_truth",
                      tf.cast(
                          tf.expand_dims(tf.argmax(self.y_, dimension=3),
                                         dim=3), tf.uint8),
                      max_outputs=2)
     if weights is None:
         vgg_weights_path = './vgg16.npy'
     else:
         vgg_weights_path = None
     self.vgg = vgg16(self.imgs,
                      weights=vgg_weights_path,
                      sess=self.sess,
                      skip_layers=['fc6', 'fc7', 'fc8'])
     self.inference()
     if weights is not None and sess is not None:
         load_with_skip(weights, sess, [])
Пример #2
0
 def __init__(self, imgs, weights=None, sess=None, skip_layers=[]):
     self.imgs = imgs
     self.trainable = {
         'conv1_1': True,
         'conv1_2': True,
         'conv2_1': True,
         'conv2_2': True,
         'conv3_1': True,
         'conv3_2': True,
         'conv3_3': True,
         'conv4_1': True,
         'conv4_2': True,
         'conv4_3': True,
         'conv5_1': True,
         'conv5_2': True,
         'conv5_3': True,
         'fc6': True,
         'fc7': True,
         'fc8': True
     }
     self.layers_name = [
         'conv1_1',
         'conv1_2',
         'conv2_1',
         'conv2_2',
         'conv3_1',
         'conv3_2',
         'conv3_3',
         'conv4_1',
         'conv4_2',
         'conv4_3',
         'conv5_1',
         'conv5_2',
         'conv5_3',
     ]
     self.classesnumber = 2
     self.regularizer = None
     self.convlayers()
     if weights is not None and sess is not None:
         load_with_skip(weights, sess, skip_layers)
def ext_dataset_fea_use_vgg16(dataset_root_path, dataset_feature_root_path):
    pre_trained_weights = r'/home/vincent/Desktop/jsl thesis/GradTest_vinny/UCM/dataset_rotated/logs/train/vgg16.npy'
    img_path = tf.placeholder(tf.string)
    img_content = tf.read_file(img_path)
    img = tf.image.decode_image(img_content, channels=3)

    img = tf.image.resize_image_with_crop_or_pad(img, config.IMG_W, config.IMG_H)
    img = tf.image.per_image_standardization(img)
    # with tf.Session() as sess:
    #     image = sess.run(img, feed_dict={img_path:'agricultural00.jpg'})
    #     plt.imshow(image)
    #     plt.show()
    x = tf.placeholder(tf.float32, shape=[1, config.IMG_W, config.IMG_H, 3])
    # y_ = tf.placeholder(tf.int16, shape=[1, config.N_CLASSES])
    #
    img_fea = VGG.VGG16N_CNN(x, config.N_CLASSES, False)
    with tf.Session() as sess:
        tools.load_with_skip(pre_trained_weights, sess, ['fc6', 'fc7', 'fc8'])
        for class_name in os.listdir(dataset_root_path):
            class_path = os.path.join(dataset_root_path, class_name)
            print('--**extracting %s**--' % class_path)
            fea_class_path = os.path.join(dataset_feature_root_path, class_name)
            if not os.path.exists(fea_class_path):
                os.mkdir(fea_class_path)
            for img_name in os.listdir(class_path):

                jpg_img_path = os.path.join(class_path, img_name)
                fea_txt_path = os.path.join(fea_class_path, img_name[:-4]+'.txt')
                print('extracting %s' % jpg_img_path)

                image = sess.run(img, feed_dict={img_path: jpg_img_path})
                # plt.imshow(image)
                # plt.show()
                img_fea_out = sess.run(img_fea, feed_dict={x: [image]})
                # print(img_fea_out.shape)

                str_img_fea = ','.join(map(str, img_fea_out[0].tolist()))
                with open(fea_txt_path, 'w') as f:
                    f.write(str_img_fea)
Пример #4
0
def train():
    with tf.name_scope('input'):
        train, train_label, val, val_label = input_train_val_split.get_files(
            train_dir, RATIO)
        tra_image_batch, tra_label_batch = input_train_val_split.get_batch(
            train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
        val_image_batch, val_label_batch = input_train_val_split.get_batch(
            val, val_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

    x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
    y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE, N_CLASSES])

    logits = VGG.VGG16N(x, N_CLASSES, IS_PRETRAIN)
    loss = tools.loss(logits, y_)
    accuracy = tools.accuracy(logits, y_)

    my_global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = tools.optimize(loss, learning_rate, my_global_step)

    saver = tf.train.Saver(tf.global_variables())
    summary_op = tf.summary.merge_all()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        tools.load_with_skip(pre_trained_weights, sess, ['fc8'])
        print("load weights done")

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
        val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)

        try:
            for step in np.arange(MAX_STEP):
                if coord.should_stop():
                    break
                tra_images, tra_labels = sess.run(
                    [tra_image_batch, tra_label_batch])
                _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],
                                                feed_dict={
                                                    x: tra_images,
                                                    y_: tra_labels
                                                })
                if step % 2 == 0 or (step + 1) == MAX_STEP:

                    print('Step: %d, loss: %.4f, accuracy: %.4f%%' %
                          (step, tra_loss, tra_acc))
                    _, summary_str = sess.run([train_op, summary_op],
                                              feed_dict={
                                                  x: tra_images,
                                                  y_: tra_labels
                                              })
                    tra_summary_writer.add_summary(summary_str, step)

                if step % 4 == 0 or (step + 1) == MAX_STEP:
                    val_images, val_labels = sess.run(
                        [val_image_batch, val_label_batch])
                    val_loss, val_acc = sess.run([loss, accuracy],
                                                 feed_dict={
                                                     x: val_images,
                                                     y_: val_labels
                                                 })

                    print(
                        '**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **'
                        % (step, val_loss, val_acc))
                    _, summary_str = sess.run([train_op, summary_op],
                                              feed_dict={
                                                  x: val_images,
                                                  y_: val_labels
                                              })
                    val_summary_writer.add_summary(summary_str, step)

                if step % 8 == 0 or (step + 1) == MAX_STEP:
                    checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)

        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')

        finally:
            coord.request_stop()

        coord.join(threads)
Пример #5
0
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(tf.initialize_all_variables())

# testImage = sess.run(img_tf)
# testImage3 = sess.run(img_tf3)
# print(testImage.shape)
# print(testImage3.shape)
# fig = plt.figure()
# fig.add_subplot(1,2,1)
# plt.imshow(testImage)
# fig.add_subplot(1,2,2)
# plt.imshow(testImage3)
# plt.show()

tools.load_with_skip('./VGG16_pretrain/vgg16.npy', sess, [])

print('Testing Network!\n')
logits = VGG.VGG16(img_tf2, 1000, True)
print(logits.eval(session=sess), '\n')

# with tf.Graph().as_default():
#     log_dir = './logs2/train/'
#     test_dir = '/home/yuxin/data/cifar10_data/'
#     n_test = 10000
#
#     logits = VGG.VGG16(test_iamge_batch, N_CLASSES, IS_PRETRAIN)
#     correct = tools.num_correct_prediction(logits, test_label_batch)
#     saver = tf.train.Saver(tf.global_variables())
#
#     with tf.Session() as sess:
Пример #6
0
def train():

    step = 0  #step
    bs = 128  #batch size
    pre_trained_weights = main_dir + 'vgg16.npy'  #vgg16 weight
    train_log_dir = main_dir + 'trainloggm1rss/tlog'  #train log path
    val_log_dir = main_dir + 'trainloggm1rss/vlog'  # val log path
    train_data_dir = main_dir + 'ymodellog'  # save model path
    #    rd=main_dir+'modellog'
    #train data
    tra_filename = np.load(main_dir + "sf_filename.npy")
    tra_label = np.load(main_dir + "sf_label.npy")
    tra_vector = np.load(main_dir + "sf_vector.npy")
    tra_4 = np.load(main_dir + "sf_4.npy")
    #val data
    val_filename = np.load(main_dir + "sf_gm1vfilename.npy")
    val_label = np.load(main_dir + "sf_gm1vlabel.npy")
    val_vector = np.load(main_dir + "sf_gm1vvector.npy")
    val_4 = np.load(main_dir + "sf_gm1v4.npy")
    with tf.Graph().as_default() as g:
        tra_image_p = tf.placeholder(tra_filename.dtype, tra_filename.shape)
        tra_label_p = tf.placeholder(tra_label.dtype, tra_label.shape)
        tra_vector_p = tf.placeholder(tra_vector.dtype, tra_vector.shape)
        tra_4_p = tf.placeholder(tra_4.dtype, tra_4.shape)
        tdataset = tf.contrib.data.Dataset.from_tensor_slices(
            (tra_image_p, tra_label_p, tra_vector_p, tra_4_p))
        tdataset = tdataset.map(pre_function, num_threads=64)
        tdataset = tdataset.shuffle(1024 * 16)
        tdataset = tdataset.repeat()  #重复
        tdataset = tdataset.batch(bs)
        tra_iterator = tdataset.make_initializable_iterator()

        val_image_p = tf.placeholder(val_filename.dtype, val_filename.shape)
        val_label_p = tf.placeholder(val_label.dtype, val_label.shape)
        val_vector_p = tf.placeholder(val_vector.dtype, val_vector.shape)
        val_4_p = tf.placeholder(val_4.dtype, val_4.shape)
        vdataset = tf.contrib.data.Dataset.from_tensor_slices(
            (val_image_p, val_label_p, val_vector_p, val_4_p))
        vdataset = vdataset.map(pre_function)
        vdataset = vdataset.repeat()  #重复
        vdataset = vdataset.batch(bs)
        val_iterator = vdataset.make_initializable_iterator()
        # Generate placeholders for the images and labels.
        x = tf.placeholder(tf.float32, shape=[bs, 224, 224, 3])
        v = tf.placeholder(tf.float32, shape=[bs, 280])
        y_ = tf.placeholder(tf.int32, shape=[bs, 2])  #??
        s_ = tf.placeholder(tf.float32, shape=[bs, 4])  #??
        BN_istrain = tf.placeholder(tf.bool)
        # Build a Graph that computes predictions from the inference model.
        logits = VGG16N.VGG16N(x, N_CLASSES, v, BN_istrain)
        # Add to the Graph the Ops for loss calculation.
        loss, mean_summary, total_loss_summary, loss_averages_op = tools.loss(
            logits, y_, s_)
        # Add to the Graph the Ops that calculate and apply gradients.
        my_global_step = tf.Variable(0, name='global_step', trainable=False)
        train_op = tools.optimize(loss, my_global_step, loss_averages_op)
        # Add the Op to compare the logits to the labels during evaluation.
        accuracy, accuracy_summary = tools.accuracy(logits, y_)
        # Build the summary Tensor based on the TF collection of Summaries.
        summary = tf.summary.merge(
            [mean_summary, accuracy_summary, total_loss_summary])
        # Add the variable initializer Op.
        saver = tf.train.Saver(max_to_keep=100)
        init = tf.global_variables_initializer()
        # Create a saver for writing training checkpoints.
        # Create a session for running Ops on the Graph.
        sess = tf.Session()

        # Instantiate a SummaryWriter to output summaries and the Graph.
        tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
        val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)

        # And then after everything is built:
        # Run the Op to initialize the variables.
        sess.run(init)
        tools.load_with_skip(pre_trained_weights, sess, ['fc6', 'fc7', 'fc8'])
        #        sess.run(tra_iterator.initializer, feed_dict={tra_image_p: tra_filename,tra_label_p: tra_label,tra_vector_p: tra_vector})
        sess.run(val_iterator.initializer,
                 feed_dict={
                     val_image_p: val_filename,
                     val_label_p: val_label,
                     val_vector_p: val_vector,
                     val_4_p: val_4
                 })
        tra_next = tra_iterator.get_next()
        val_next = val_iterator.get_next()
        print("Reading checkpoints...")

        for epoch in range(num_epoch):
            shuu.shu()
            tra_filename = np.load(main_dir + "gm1sf_filename.npy")
            tra_label = np.load(main_dir + "gm1sf_label.npy")
            tra_vector = np.load(main_dir + "gm1sf_vector.npy")
            tra_4 = np.load(main_dir + "gm1sf_4.npy")
            sess.run(tra_iterator.initializer,
                     feed_dict={
                         tra_image_p: tra_filename,
                         tra_label_p: tra_label,
                         tra_vector_p: tra_vector,
                         tra_4_p: tra_4
                     })
            while True:
                try:
                    for step in range(MAX_STEP):
                        tra_all = sess.run(tra_next)
                        tra_i = tra_all[0]
                        tra_l = tra_all[1]
                        tra_v = tra_all[2]
                        tra_f = tra_all[3]
                        summary_str, _, tra_loss, tra_acc = sess.run(
                            [summary, train_op, loss, accuracy],
                            feed_dict={
                                x: tra_i,
                                y_: tra_l,
                                v: tra_v,
                                s_: tra_f,
                                BN_istrain: True
                            })

                        if step % 20 == 0 or (step + 1) == MAX_STEP:
                            tra_summary_writer.add_summary(summary_str, step)
#                        print ('Step: %d, loss: %.4f' % (step, tra_loss))

                        if step % 20 == 0 or (step + 1) == MAX_STEP:
                            val_all = sess.run(val_next)
                            val_i = val_all[0]
                            val_l = val_all[1]
                            val_v = val_all[2]
                            val_f = val_all[3]
                            val_loss, val_acc = sess.run(
                                [loss, accuracy],
                                feed_dict={
                                    x: val_i,
                                    y_: val_l,
                                    v: val_v,
                                    s_: val_f,
                                    BN_istrain: False
                                })
                            print(
                                '**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **'
                                % (step, val_loss, val_acc))

                            summary_str = sess.run(summary,
                                                   feed_dict={
                                                       x: val_i,
                                                       y_: val_l,
                                                       v: val_v,
                                                       s_: val_f,
                                                       BN_istrain: False
                                                   })
                            val_summary_writer.add_summary(summary_str, step)


#                    if step == 99:  # Record execution stats
#                        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
#                        run_metadata = tf.RunMetadata()
#                        summary_str, _= sess.run([summary,train_op],
#                                                    feed_dict={x:tra_i, y_:tra_l, v:tra_v, BN_istrain:True},options=run_options,run_metadata=run_metadata)
#                        tra_summary_writer.add_run_metadata(run_metadata, 'step%d' % step)
#                        tra_summary_writer.add_summary(summary_str, step)
#                        print('Adding run metadata for', step)
                        if step % 10000 == 0:
                            checkpoint_path = os.path.join(
                                train_data_dir, 'model.ckpt')
                            saver.save(sess, checkpoint_path, global_step=step)

                except tf.errors.OutOfRangeError:
                    break
        sess.close()
Пример #7
0
def train():
    pre_trained_weights = '/home/xiaoyi/data/LED/VGG16_pretrained/vgg16.npy'
    large_dir = '/home/xiaoyi/data/LED/data/train/train_large_crop/'
    small_dir = '/home/xiaoyi/data/LED/data/train/train_small_crop/'
    val_large_dir = '/home/xiaoyi/data/LED/test/test_large/'
    val_small_dir = '/home/xiaoyi/data/LED/test/test_small/'
    train_log_dir = '/home/xiaoyi/data/LED/logs1/train/'
    val_log_dir = '/home/xiaoyi/data/LED/logs1/val/'

    with tf.name_scope('input'):
        train, train_laebl = input_data.get_files(large_dir, small_dir)
        train_batch, train_label_batch = input_data.get_batch(
            train, train_laebl, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
        val, val_label = input_data.get_files(val_large_dir, val_small_dir)
        val_batch, val_label_batch = input_data.get_batch(
            val, val_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

    logits = VGG.VGG16N(train_batch, N_CLASSES, IS_PRETRAIN)
    loss = tools.loss(logits, train_label_batch)
    accuracy = tools.accuracy(logits, train_label_batch)
    my_global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = tools.optimize(loss, learning_rate, my_global_step)

    x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
    y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE, N_CLASSES])

    saver = tf.train.Saver(tf.global_variables())
    summary_op = tf.summary.merge_all()

    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    tools.load_with_skip(pre_trained_weights, sess, ['fc6', 'fc7', 'fc8'])

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break

            tra_images, tra_labels = sess.run([train_batch, train_label_batch])
            _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],
                                            feed_dict={
                                                x: tra_images,
                                                y_: tra_labels
                                            })

            if step % 50 == 0 or (step + 1) == MAX_STEP:
                print('Step: %d,loss:%.4f,accuracy:%.4f%%' %
                      (step, tra_loss, tra_acc))
                summary_str = sess.run(summary_op)
                tra_summary_writer.add_summary(summary_str, step)

            if step % 200 == 0 or (step + 1) == MAX_STEP:
                val_images, val_labels = sess.run([val_batch, val_label_batch])
                val_loss, val_acc = sess.run([loss, accuracy],
                                             feed_dict={
                                                 x: val_images,
                                                 y_: val_labels
                                             })
                print('** Step %d,val loss = %.2f,val accuracy = %.2f%% **' %
                      (step, val_loss, val_acc))

                summary_str = sess.run(summary_op)
                val_summary_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')

    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
Пример #8
0
## 内容损失
content_loss = tools.content_loss(generated_feature_maps[CONTENT_LAYER],
                                  content_features[CONTENT_LAYER],
                                  content_weight)

## 总损失
loss = content_loss + style_loss

train_step = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(loss)

init = tf.global_variables_initializer()
with tf.Session() as sess:
    print('in')
    sess.run(init)
    # 加载vgg参数,跳过list中的参数不加载
    tools.load_with_skip(pre_trained_weights, sess,
                         ['conv5_2', 'conv5_3', 'fc6', 'fc7', 'fc8'])

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    ## 计算风格图像 的各层特征图的Gram矩阵
    style_gram_data = sess.run([style_gram],
                               feed_dict={style_image: styImgReaded})

    print('over')

    coord.join(threads)
    sess.close()
Пример #9
0
def train():

    #    pre_trained_weights1 = './/vgg16.npy'
    pre_trained_weights = './/vgg-face.mat'
    data_dir = '/home/hadoop/Desktop/My-TensorFlow-tutorials-master/VGG face segmentation  recognition/data/segmentation/training/'
    train_log_dir = './/logss/train_shuffle/'
    val_log_dir = './/logss/va_shuffle/'

    #    image_batch, label_batch = notMNIST_input.read_and_decode(tfrecords_file,BATCH_SIZE)
    image, label = notMNIST_input.get_file(data_dir)
    #        image_batch,label_batch=notMNIST_input.get_batch(image, label, IMG_W, IMG_H, BATCH_SIZE, capacity)
    X = np.array(image)
    Y = np.array(label)
    kf = KFold(n_splits=10, shuffle=False)
    total_acc = 0
    for train, test in kf.split(X, Y):
        tf.reset_default_graph()
        image_batch, label_batch = notMNIST_input.get_batch(X[train],
                                                            Y[train],
                                                            IMG_W,
                                                            IMG_H,
                                                            BATCH_SIZE,
                                                            capacity,
                                                            shuffle=True)
        image_batch_validate, label_batch_validate = notMNIST_input.get_batch(
            X[test],
            Y[test],
            IMG_W,
            IMG_H,
            BATCH_SIZE,
            capacity,
            shuffle=False)
        #        print("dddd")
        ##        print("train_index: , test_index:", (X[train],Y[train],X[test],Y[test]))
        print("X[train]/n", len(X[train]))
        print("Y[train]/n", len(Y[train]))
        print("X[test]", len(X[test]))
        print("Y[test]", len(Y[test]))

        #cast (1.8,3.4)float32 to (1,3)int64

        x = tf.placeholder(tf.float32,
                           shape=[BATCH_SIZE, IMG_W, IMG_H, 3],
                           name='place_x')
        y_ = tf.placeholder(tf.int64, shape=[
            BATCH_SIZE,
        ], name='place_y')
        logits = VGG.VGG16N(x, N_CLASSES, IS_PRETRAIN)
        print("****logits shape is ", logits.shape)

        loss = tools.loss(logits, y_)

        print("label_batch is ", y_.shape)
        accuracy = tools.accuracy(logits, y_)

        my_global_step = tf.Variable(0, name='global_step', trainable=False)
        #learning_rate = tf.train.exponential_decay(starter_learning_rate, my_global_step,
        #  2200, 0.96, staircase=True)
        train_op = tools.optimize(loss, starter_learning_rate, my_global_step)
        #    train_op_vali = tools.optimize(loss_vali, learning_rate, my_global_step)

        saver = tf.train.Saver(tf.global_variables())
        summary_op = tf.summary.merge_all()

        init = tf.global_variables_initializer()

        sess = tf.Session()

        sess.run(init)

        # load the parameter file, assign the parameters, skip the specific layers
        tools.load_with_skip(pre_trained_weights, sess, ['fc6', 'fc7', 'fc8'])

        merged_summaries = tf.summary.merge_all()
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
        val_summary_writer = tf.summary.FileWriter(val_log_dir)
        max_acc = 0
        total_time = 0

        try:
            for step in np.arange(MAX_STEP):
                if coord.should_stop():
                    break
                start_time = time.time()
                #        with tf.Session() as sess:

                #                 for train, test in kf.split(X,Y):
                #                     image_batch,label_batch=notMNIST_input.get_batch(X[train], Y[train], IMG_W, IMG_H, BATCH_SIZE, capacity)
                #                     image_batch_validate, label_batch_validate=notMNIST_input.get_batch(X[test], Y[test], IMG_W, IMG_H, BATCH_SIZE, capacity)
                #                     label_batch = tf.cast(label_batch,dtype=tf.int64)
                x_train_a, y_train_a = sess.run([image_batch, label_batch])
                x_test_a, y_test_a = sess.run(
                    [image_batch_validate, label_batch_validate])
                #            _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy])
                #            tra_images,tra_labels = sess.run([image_batch, label_batch])
                _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],
                                                feed_dict={
                                                    x: x_train_a,
                                                    y_: y_train_a
                                                })

                if step % 10 == 0 or (step + 1) == MAX_STEP:
                    feed_dict = {x: x_train_a, y_: y_train_a}
                    summary_str = sess.run(summary_op, feed_dict=feed_dict)
                    tra_summary_writer.add_summary(summary_str, step)
                    time_elapsed = time.time() - start_time
                    print(
                        'Step:%d , loss: %.2f, accuracy: %.2f%%(%.2f sec/step)'
                        % (step, tra_loss, tra_acc * 100, time_elapsed))

                    total_time = total_time + time_elapsed
                    if step % 50 == 0:
                        print('total time is :%.2f' % (total_time))

                if step % 200 == 0 or (step + 1) == MAX_STEP:

                    val_loss, val_acc = sess.run([loss, accuracy],
                                                 feed_dict={
                                                     x: x_test_a,
                                                     y_: y_test_a
                                                 })
                    feed_dict = {x: x_test_a, y_: y_test_a}
                    summary_str = sess.run(summary_op, feed_dict=feed_dict)
                    val_summary_writer.add_summary(summary_str, step)

                    #                if cur_val_loss > max_acc:
                    #                         max_acc = cur_val_loss
                    #                         best_step = step
                    #                         checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                    #                         saver.save(sess, checkpoint_path, global_step=step)
                    #                val_summary_writer.add_summary(summary, step)
                    #                print("Model updated and saved in file: %s" % checkpoint_path)
                    #                print ('*************step %5d: loss %.5f, acc %.5f --- loss val %0.5f, acc val %.5f************'%(best_step,tra_loss, tra_acc, cur_val_loss, cur_val_eval))

                    #

                    print(
                        '************validate result:Step:%d , loss: %.2f, accuracy: %.2f%%(%.2f sec/step)'
                        % (step, val_loss, val_acc * 100, time_elapsed))
                    if val_acc > max_acc:
                        max_acc = val_acc
                        checkpoint_path = os.path.join(train_log_dir,
                                                       'model.ckpt')
                        saver.save(sess, checkpoint_path, global_step=step)
            if max_acc > total_acc:
                total_acc = max_acc
                checkpoint_path = os.path.join(val_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        finally:
            coord.request_stop()

        coord.join(threads)
        sess.close()
Пример #10
0
def train():
    
    pre_trained_weights = './vgg16_pretrain/vgg16.npy'
    train_data_dir = './data/train/scene_train_images_20170904/'
    train_label_json = './data/train/scene_train_annotations_20170904.json'
    val_data_dir = './data/val/scene_validation_images_20170908/'
    val_label_json = './data/val/scene_validation_annotations_20170908.json'
    train_log_dir = './logs/train/'
#    val_log_dir = './logs/val/'
    
    with tf.name_scope('input'):
        
        tra_images, tra_labels = input_data.get_files(train_label_json, train_data_dir)
        
        tra_image_batch, tra_label_batch =  input_data.get_batch(tra_images, 
                                                                 tra_labels,
                                                                 IMG_W,
                                                                 IMG_H,
                                                                 BATCH_SIZE,
                                                                 CAPACITY,
                                                                 N_CLASSES)
        
        val_images, val_labels = input_data.get_files(val_label_json, val_data_dir)
        val_image_batch, val_label_batch = input_data.get_batch(val_images,
                                                                val_labels,
                                                                IMG_W,
                                                                IMG_H,
                                                                BATCH_SIZE,
                                                                CAPACITY,
                                                                N_CLASSES)
        
    x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
    y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE, N_CLASSES]) 
    keep_prob = tf.placeholder(tf.float32)
    
    logits = VGG.VGG16N(x, N_CLASSES, keep_prob, IS_PRETRAIN)
    loss = tools.loss(logits, y_)
    accuracy = tools.accuracy(logits, y_)
    my_global_step = tf.Variable(0, name='global_step', trainable=False) 
    train_op = tools.optimize(loss, learning_rate, my_global_step)
      
    saver = tf.train.Saver(tf.global_variables())
#    summary_op = tf.summary.merge_all()   
      
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    
    #load the parameter file, assign the parameters, skip the specific layers
    tools.load_with_skip(pre_trained_weights, sess, ['fc6','fc7','fc8'])   


    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)    
#    tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
#    val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)
   
    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                    break
               
            train_images,train_labels = sess.run([tra_image_batch, tra_label_batch])
            #print(str(train_images.get_shape()))
            _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],
                                           feed_dict={x: train_images, y_: train_labels, keep_prob: 0.2})            
            if step % 50 == 0 or (step + 1) == MAX_STEP:                 
#                _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],
#                                                feed_dict={x: train_images, y_: train_labels}) 
                print ('Step: %d, loss: %.3f, accuracy: %.3f%%' % (step, tra_loss, tra_acc))
#                summary_str = sess.run(summary_op)
#                tra_summary_writer.add_summary(summary_str, step)
               
            if step % 200 == 0 or (step + 1) == MAX_STEP:
                validation_images, validation_labels = sess.run([val_image_batch, val_label_batch])
                val_loss, val_acc = sess.run([loss, accuracy],
                                            feed_dict={x: validation_images, y_: validation_labels, keep_prob: 1})
                print('**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **' %(step, val_loss, val_acc))

#                summary_str = sess.run(summary_op)
#                val_summary_writer.add_summary(summary_str, step)
                   
            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
               
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()
       
    coord.join(threads)
    sess.close()
def patch_train(cluster, folder_clustermaps):
    '''
    :param cluster: the cluster to perform patch-wise classify
    :param folder_clustermaps: folder of cluster maps
    '''
    train_log_dir = './/logs//train//'
    val_log_dir = './/logs//val//'
    feature_dict = input_data.get_feature_dict('D://data//1-10//data.csv',
                                               feature_to_classify)

    # setup of VGG16-like CNN
    x = tf.placeholder(tf.float32, shape=(BATCH_SIZE, IMG_W, IMG_H, IMG_D))
    y_ = tf.placeholder(tf.int16, shape=(BATCH_SIZE, N_CLASSES))
    logits = VGG.VGG16_nobn(x, N_CLASSES, TRAINABLE)
    loss = tools.loss(logits, y_)
    accuracy = tools.accuracy(logits, y_)
    my_global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = tools.optimize(loss, learning_rate, my_global_step)
    saver = tf.train.Saver(tf.global_variables())
    summary_op = tf.summary.merge_all()
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        if IS_FINETUNING:
            # load the parameter file, assign the parameters, skip the specific layers
            print('**  Loading pre-trained weights  **')
            tools.load_with_skip(pre_trained_weights, sess,
                                 ['fc6', 'fc7', 'fc8'])

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
        val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)
        shuffled_list_train = input_data.get_full_list(
            data_type='train',
            cluster=cluster,
            folder=data_dir,
            folder_clustermaps=folder_clustermaps)
        shuffled_list_val = input_data.get_full_list(
            data_type='val',
            cluster=cluster,
            folder=data_dir,
            folder_clustermaps=folder_clustermaps)
        shuffled_list_val = np.hstack((shuffled_list_val, shuffled_list_val))
        shuffled_list_val = np.hstack((shuffled_list_val, shuffled_list_val))
        shuffled_list_val = np.hstack((shuffled_list_val, shuffled_list_val))

        try:
            for epoch in np.arange(MAX_EPOCH):
                np.random.shuffle(shuffled_list_train)
                np.random.shuffle(shuffled_list_val)
                max_step = int(len(shuffled_list_train) / BATCH_SIZE)
                for step in np.arange(max_step):
                    tra_image_batch, tra_label_batch = input_data.read_local_data(
                        data_dir=data_dir,
                        batch_size=BATCH_SIZE,
                        step=step,
                        feature_dict=feature_dict,
                        n_classes=N_CLASSES,
                        name_list=shuffled_list_train)
                    if coord.should_stop():
                        break

                    tra_labels = sess.run(tra_label_batch)
                    tra_images = tra_image_batch
                    _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],
                                                    feed_dict={
                                                        x: tra_images,
                                                        y_: tra_labels
                                                    })
                    if step % 10 == 0:
                        print(
                            'Epoch: %d (MAX_EPOCH = %d), Step: %d (MAX_Step = %d), loss: %.4f, accuracy: %.4f%%'
                            % (epoch, MAX_EPOCH, step, max_step, tra_loss,
                               tra_acc))

                        summary_str = sess.run(summary_op,
                                               feed_dict={
                                                   x: tra_images,
                                                   y_: tra_labels
                                               })
                        tra_summary_writer.add_summary(summary_str, step)

                    if step % 50 == 0:
                        val_image_batch, val_label_batch = input_data.read_local_data(
                            data_dir=data_dir,
                            batch_size=BATCH_SIZE,
                            step=step / 50,
                            feature_dict=feature_dict,
                            n_classes=N_CLASSES,
                            name_list=shuffled_list_val)
                        val_labels = sess.run(val_label_batch)
                        val_images = val_image_batch
                        val_loss, val_acc = sess.run([loss, accuracy],
                                                     feed_dict={
                                                         x: val_images,
                                                         y_: val_labels
                                                     })
                        print(
                            '**  Epoch: %d, Step %d, val loss = %.2f, val accuracy = %.2f%%  **'
                            % (epoch, step, val_loss, val_acc))

                        summary_str = sess.run(summary_op,
                                               feed_dict={
                                                   x: val_images,
                                                   y_: val_labels
                                               })
                        val_summary_writer.add_summary(summary_str, step)

                        # logits_array = sess.run(logits, feed_dict={x: tra_images})
                        # labels_array = sess.run(y_, feed_dict={y_: tra_labels})
                        # logits_array = np.around(logits_array, decimals=3)
                        # print('==========TRAAIN==========')
                        # print(np.hstack((logits_array, labels_array)))
                        #
                        # logits_array = sess.run(logits, feed_dict={x: val_images})
                        # labels_array = sess.run(y_, feed_dict={y_: val_labels})
                        # logits_array = np.around(logits_array, decimals=3)
                        # print('=========VALIDATE=========')
                        # print(np.hstack((logits_array, labels_array)))

                        if step % 2000 == 0:
                            checkpoint_path = os.path.join(
                                train_log_dir, 'model_' + str(epoch) + '_' +
                                str(step) + '.ckpt')
                            saver.save(sess, checkpoint_path, global_step=step)
        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        finally:
            coord.request_stop()
        coord.join(threads)
def train():
    
    pre_trained_weights = './/vgg16_pretrain//vgg16.npy'
    data_dir = './/data//cifar-10-batches-bin//'
    train_log_dir = './/logs//train//'
    val_log_dir = './/logs//val//'
    
    with tf.name_scope('input'):
        tra_image_batch, tra_label_batch = input_data.read_cifar10(data_dir=data_dir,
                                                 is_train=True,
                                                 batch_size= BATCH_SIZE,
                                                 shuffle=True)
        val_image_batch, val_label_batch = input_data.read_cifar10(data_dir=data_dir,
                                                 is_train=False,
                                                 batch_size= BATCH_SIZE,
                                                 shuffle=False)
    
    logits = VGG.VGG16N(tra_image_batch, N_CLASSES, IS_PRETRAIN)
    loss = tools.loss(logits, tra_label_batch)
    accuracy = tools.accuracy(logits, tra_label_batch)
    my_global_step = tf.Variable(0, name='global_step', trainable=False) 
    train_op = tools.optimize(loss, learning_rate, my_global_step)
    
    x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
    y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE, N_CLASSES])    
    
    saver = tf.train.Saver(tf.global_variables())
    summary_op = tf.summary.merge_all()   
       
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    
    # load the parameter file, assign the parameters, skip the specific layers
    tools.load_with_skip(pre_trained_weights, sess, ['fc6','fc7','fc8'])   


    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)    
    tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)
    
    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                    break
                
            tra_images,tra_labels = sess.run([tra_image_batch, tra_label_batch])
            _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],
                                            feed_dict={x:tra_images, y_:tra_labels})            
            if step % 50 == 0 or (step + 1) == MAX_STEP:                 
                print ('Step: %d, loss: %.4f, accuracy: %.4f%%' % (step, tra_loss, tra_acc))
                summary_str = sess.run(summary_op)
                tra_summary_writer.add_summary(summary_str, step)
                
            if step % 200 == 0 or (step + 1) == MAX_STEP:
                val_images, val_labels = sess.run([val_image_batch, val_label_batch])
                val_loss, val_acc = sess.run([loss, accuracy],
                                             feed_dict={x:val_images,y_:val_labels})
                print('**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **' %(step, val_loss, val_acc))

                summary_str = sess.run(summary_op)
                val_summary_writer.add_summary(summary_str, step)
                    
            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
                
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()
        
    coord.join(threads)
    sess.close()
Пример #13
0
def train():
    pre_trained_weights = r'/home/vincent/Desktop/jsl thesis/grad thesis/data/vgg16_pretrained/vgg16.npy'
    data_train_dir = r'/home/vincent/Desktop/jsl thesis/GradTest_vinny/UCM/dataset_rotated/train'
    data_test_dir = r'/home/vincent/Desktop/jsl thesis/GradTest_vinny/UCM/dataset_rotated/validation/'
    train_log_dir = r'/home/vincent/Desktop/jsl thesis/GradTest_vinny/UCM/dataset_rotated/logs/train'
    val_log_dir = r'/home/vincent/Desktop/jsl thesis/GradTest_vinny/UCM/dataset_rotated/logs/val'

    with tf.name_scope('input'):
        # tra_image_batch, tra_label_batch = input_data.read_cifar10(data_dir=data_dir,
        #                                                            is_train=True,
        #                                                            batch_size=BATCH_SIZE,
        #                                                            shuffle=True)
        # val_image_batch, val_label_batch = input_data.read_cifar10(data_dir=data_dir,
        #                                                            is_train=False,
        #                                                            batch_size=BATCH_SIZE,
        #                                                            shuffle=False)
        image_train_list, label_train_list = get_files(data_train_dir)
        image_val_list, label_val_list = get_files(data_test_dir)
        # image_batch, label_batch = get_batch(image_train_list, label_train_list, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
        # val_image_batch, val_label_batch = get_batch(image_val_list, label_val_list, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
        image_batch = get_batch_datasetVersion(image_train_list,
                                               label_train_list, IMG_W, IMG_H,
                                               BATCH_SIZE, CAPACITY)
        val_batch = get_batch_datasetVersion(image_val_list, label_val_list,
                                             IMG_W, IMG_H, BATCH_SIZE,
                                             CAPACITY)

    # x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
    # y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE, N_CLASSES])

    x = tf.placeholder(tf.float32, shape=[None, IMG_W, IMG_H, 3])
    y_ = tf.placeholder(tf.int16, shape=[None, N_CLASSES])

    logits = VGG.VGG16N(x, N_CLASSES, IS_PRETRAIN)
    loss = tools.loss(logits, y_)
    accuracy = tools.accuracy(logits, y_)
    my_global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = tools.optimize(loss, learning_rate, my_global_step)

    saver = tf.train.Saver(tf.global_variables())
    summary_op = tf.summary.merge_all()

    init = tf.global_variables_initializer()
    sess = tf.Session()

    sess.run(init)

    # load the parameter file, assign the parameters, skip the specific layers
    tools.load_with_skip(pre_trained_weights, sess, ['fc6', 'fc7', 'fc8'])
    coord = tf.train.Coordinator()
    #threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)

    #restore older checkpoints
    if RESTORE_MODEL == True:

        print("Reading checkpoints.../n")

        log_dir = r'/home/vincent/Desktop/jsl thesis/GradTest_vinny/UCM/dataset_rotated/logs/train'
        model_name = r'model.ckpt-2000.meta'
        data_name = r'model.ckpt-2000'
        #restore Graph
        saver = tf.train.import_meta_graph(log_dir + os.sep + model_name)
        #restore paras
        saver.restore(sess, log_dir + os.sep + data_name)
        print("Loading checkpoints successfully!! /n")

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break

            #tra_images, tra_labels = sess.run([image_batch, label_batch])
            tra_images, tra_labels = sess.run(image_batch)
            _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],
                                            feed_dict={
                                                x: tra_images,
                                                y_: tra_labels
                                            })
            if step % 50 == 0 or (step + 1) == MAX_STEP:
                print('Step: %d, loss: %.4f, accuracy: %.4f%%' %
                      (step, tra_loss, tra_acc))
                summary_str = sess.run(summary_op,
                                       feed_dict={
                                           x: tra_images,
                                           y_: tra_labels
                                       })
                tra_summary_writer.add_summary(summary_str, step)

            if step % 200 == 0 or (step + 1) == MAX_STEP:
                val_images, val_labels = sess.run(val_batch)
                #val_images, val_labels = sess.run([val_image_batch, val_label_batch])
                val_loss, val_acc = sess.run([loss, accuracy],
                                             feed_dict={
                                                 x: val_images,
                                                 y_: val_labels
                                             })
                print(
                    '**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **' %
                    (step, val_loss, val_acc))

                summary_str = sess.run(summary_op,
                                       feed_dict={
                                           x: val_images,
                                           y_: val_labels
                                       })
                val_summary_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    #coord.join(threads)
    sess.close()
def train(retrain=False):
    data_dir = '/home/rong/something_for_deep/cifar-10-batches-bin'
    npy_dir = '/home/rong/something_for_deep/vgg16.npy'
    train_log_dir = './logs/train'
    val_log_dir = './logs/val'

    train_image_batch, train_label_batch = input_data.read_cifar10(
        data_dir=data_dir, is_train=True, batch_size=BATCH_SIZE, shuffle=True)
    val_image_batch, val_label_batch = input_data.read_cifar10(
        data_dir=data_dir,
        is_train=False,
        batch_size=BATCH_SIZE,
        shuffle=False)

    #宣布图片batch和标签batch的占位符
    x = tf.placeholder(tf.float32,
                       shape=[BATCH_SIZE, IMG_W, IMG_H, IMG_CHANNELS])
    y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE, NUM_CLASSES])

    #宣布VGG16类型的变量
    vgg = model.VGG16()

    #宣布损失,精确度等关键节点
    logits = vgg.build(x, NUM_CLASSES, IS_PRETRAIN)
    loss = tools.loss(logits, y_)
    accuracy = tools.accuracy(logits, y_)

    my_global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = tools.optimize(loss, learning_rate, my_global_step)
    train_op2 = tools.optimize2(loss, learning_rate)

    saver = tf.train.Saver()  #括号那个参数不知道是干什么的
    summary_op = tf.summary.merge_all()

    #初始化所有的variable,之前我看过另外一种写法,那种写法好像废弃了
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    #从npy文件加载除了全连接之外,其他层的权重
    tools.load_with_skip(npy_dir, sess, ['fc6', 'fc7', 'fc8'])

    saver.restore(sess, './logs/train/model.ckpt-6000')
    output_graph_def = convert_variables_to_constants(
        sess, sess.graph_def, output_node_names=['fc8/relu'])

    with tf.gfile.FastGFile('vgg_6000.pb', mode='wb') as f:
        f.write(output_graph_def.SerializeToString())
    '''
    #下面的和多线程有关,暂时不懂
    coord = tf.train.Coordinator() #宣布线程管理器
    threads = tf.train.start_queue_runners(sess=sess, coord=coord) #线程负责把文件加入队列(input_data那个file队列)

    tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)
    '''
    '''
    if retrain == False:
        print('Reading checkpoints')
        ckpt = tf.train.get_checkpoint_state(train_log_dir)
        if ckpt and ckpt.model_checkpoint_path:
            global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
            saver.restore(sess, './logs/train/model.ckpt-10000')
            print('Loading success, global_step is %s' % global_step)
        else:
            print('No checkpoint file found')
            return
    
    saver.restore(sess, './logs/train/model.ckpt-10000')


    for step in range(50):
        train_images, train_labels = sess.run([train_image_batch, train_label_batch])
        _, train_loss, train_acc = sess.run([train_op2, loss, accuracy],
                                            feed_dict={x: train_images, y_: train_labels})
        print('Step: %d, loss: %.4f, accuracy: %.4f%%' % (step, train_loss, train_acc))
   
    saver.restore(sess, './logs/train/model.ckpt-14999')
    '''
    '''
    #下面的try语句可以当做模板使用
    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break

            #运行计算节点,从计算节点中得到真实的image,label
            train_images, train_labels = sess.run([train_image_batch, train_label_batch])

            #运行损失, 精确度计算节点, 得到具体数值
            _, train_loss, train_acc = sess.run([train_op, loss, accuracy],
                                            feed_dict={x: train_images, y_: train_labels})

            #每到50步或者最后一步就当前batch的损失值大小和准确度大小
            if step % 50 == 0 or (step + 1) == MAX_STEP:
                print('Step: %d, loss: %.4f, accuracy: %.4f%%' % (step, train_loss, train_acc))
                #summary_str = sess.run(summary_op)
                #tra_summary_writer.add_summary(summary_str, step)

            #每到200步或者最后一步就从测试集取一个batch, 计算损失值大小和准确度
            if step % 200 == 0 or (step + 1) == MAX_STEP:

                val_images, val_labels = sess.run([val_image_batch, val_label_batch])
                val_loss, val_acc = sess.run([loss, accuracy],
                                             feed_dict={x: val_images, y_: val_labels})
                print('**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **' % (step, val_loss, val_acc))

                #summary_str = sess.run(summary_op)
                #val_summary_writer.add_summary(summary_str, step)

            #每到2000步就保存一次
            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                if step == 0:
                    continue
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    '''
    sess.close()
Пример #15
0
def train():

    pre_trained_weights = './VGG16_pretrain/vgg16.npy'
    data_dir = config.dataPath
    train_log_dir = './logs2/train/'
    val_log_dir = './logs2/val/'

    with tf.name_scope('input'):
        train_image_batch, train_label_batch = input_data.read_cifar10(
            data_dir, is_train=True, batch_size=BATCH_SIZE, shuffle=True)

        val_image_batch, val_label_batch = input_data.read_cifar10(
            data_dir, is_train=False, batch_size=BATCH_SIZE, shuffle=False)

    logits = VGG.VGG16(train_image_batch, N_CLASSES, IS_PRETRAIN)
    loss = tools.loss(logits, train_label_batch)
    accuracy = tools.accuracy(logits, train_label_batch)
    my_global_step = tf.Variable(0, trainable=False, name='global_step')

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_op = tools.optimize(loss, learning_rate, my_global_step)

    x = tf.placeholder(dtype=tf.float32, shape=[BATCH_SIZE, IMG_H, IMG_W, 3])
    y_ = tf.placeholder(dtype=tf.int32, shape=[BATCH_SIZE, N_CLASSES])
    tf.summary.image('input', x, 10)
    saver = tf.train.Saver(tf.global_variables())

    summary_op = tf.summary.merge_all()
    '''if exit checkpoint
            restore
       else:
            init
    '''
    print('Reading checkpoint...')
    ckpt = tf.train.get_checkpoint_state(train_log_dir)
    sess = tf.Session()
    if ckpt and ckpt.model_checkpoint_path:
        global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        saver.restore(sess, ckpt.model_checkpoint_path)
        print('Load success, global step: %s' % global_step)
    else:
        init = tf.global_variables_initializer()
        sess.run(init)
        # load pretrain weights
        tools.load_with_skip(pre_trained_weights, sess, ['fc6', 'fc7', 'fc8'])
        print('Load pre_trained_weights success!!!')

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    train_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break

            train_images, train_labels = sess.run(
                [train_image_batch, train_label_batch])
            _, train_loss, train_accuracy = sess.run(
                [train_op, loss, accuracy],
                feed_dict={
                    x: train_images,
                    y_: train_labels
                })
            if step % 50 == 0 or (step + 1) == MAX_STEP:
                print("Step: %d, loss: %.4f, accuracy: %.4f%%" %
                      (step, train_loss, train_accuracy))
                summary_str = sess.run(summary_op,
                                       feed_dict={
                                           x: train_images,
                                           y_: train_labels
                                       })
                train_summary_writer.add_summary(summary_str, step)

            if step % 200 == 0 or (step + 1) == MAX_STEP:
                val_images, val_labels = sess.run(
                    [val_image_batch, val_label_batch])
                val_loss, val_accuracy = sess.run([loss, accuracy],
                                                  feed_dict={
                                                      x: val_images,
                                                      y_: val_labels
                                                  })
                print("** Step: %d, loss: %.4f, accuracy: %.4f%%" %
                      (step, val_loss, val_accuracy))
                summary_str = sess.run(summary_op,
                                       feed_dict={
                                           x: train_images,
                                           y_: train_labels
                                       })
                val_summary_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, save_path=checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limited reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
Пример #16
0
        yield random.randint(0, 100)


def generate_ix():
    while True:
        i = random.randint(0, 10000 / batch_size)


if is_train:
    with tf.Session(config=config) as sess:
        init = tf.global_variables_initializer()
        merged = tf.summary.merge_all()
        writer = tf.summary.FileWriter('/disk3/Graduate-design/train_log/',
                                       sess.graph)
        sess.run(init)
        tools.load_with_skip('/tmp/deep_matting/vgg16.npy', sess,
                             ['conv1_1', 'fc6', 'fc7', 'fc5', 'fc8'])
        test_data = np.load("/disk3/Graduate-design/test/{:0>3}.npy".format(0))
        F_test = np.stack([np.array(x["F"]) for x in test_data[0:1000]])
        B_test = np.stack([np.array(x["B"]) for x in test_data[0:1000]])
        I_test = np.stack([np.array(x["I"]) for x in test_data[0:1000]])
        alpha_diff_target_test = np.stack(
            [np.array([x["alpha_diff"]]) for x in test_data[0:1000]])
        for idx_f in range(1000):
            # pics = np.load("/disk3/Graduate-design/data/{:0>3}.npy".format(idx_f))
            pics = np.load(
                "/disk3/Graduate-design/data/{:0>3}.npy".format(idx_f))
            for ix in range(0, 1000, batch_size):
                F_train = np.stack(
                    [np.array(x["F"]) for x in pics[ix:ix + batch_size + 1]])
                B_train = np.stack(
                    [np.array(x["B"]) for x in pics[ix:ix + batch_size + 1]])
def train_aid():
    pre_trained_weights = r'/media/jsl/ubuntu/pretrain_weight/vgg16.npy'
    data_train_dir = os.path.join(config.aid_data_root_path, 'train')
    data_test_dir = os.path.join(config.aid_data_root_path, 'val')
    train_log_dir = os.path.join(config.aid_log_root_path, 'train')
    val_log_dir = os.path.join(config.aid_log_root_path, 'val')

    with tf.name_scope('input'):
        image_train_list, label_train_list = get_files(data_train_dir)
        image_val_list, label_val_list = get_files(data_test_dir)
        image_batch, label_batch = get_batch(image_train_list,
                                             label_train_list,
                                             config.aid_img_weight,
                                             config.aid_img_height, BATCH_SIZE,
                                             CAPACITY)
        val_image_batch, val_label_batch = get_batch(image_val_list,
                                                     label_val_list,
                                                     config.aid_img_weight,
                                                     config.aid_img_height,
                                                     BATCH_SIZE, CAPACITY)

    x = tf.placeholder(
        tf.float32,
        shape=[BATCH_SIZE, config.aid_img_weight, config.aid_img_height, 3])
    y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE, config.aid_n_class])

    logits = VGG.VGG16N(x, config.aid_n_class, IS_PRETRAIN)
    loss = tools.loss(logits, y_)
    accuracy = tools.accuracy(logits, y_)
    my_global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = tools.optimize(loss, learning_rate, my_global_step)

    saver = tf.train.Saver(tf.global_variables())
    summary_op = tf.summary.merge_all()

    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    start_time = time.strftime('%Y-%m-%d %H-%M-%S',
                               time.localtime(time.time()))
    print('start_time:', start_time)

    # load the parameter file, assign the parameters, skip the specific layers
    tools.load_with_skip(pre_trained_weights, sess, ['fc6', 'fc7', 'fc8'])
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break

            tra_images, tra_labels = sess.run([image_batch, label_batch])
            _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],
                                            feed_dict={
                                                x: tra_images,
                                                y_: tra_labels
                                            })
            if step % 50 == 0 or (step + 1) == MAX_STEP:
                print('Step: %d, loss: %.4f, accuracy: %.4f%%' %
                      (step, tra_loss, tra_acc))
                summary_str = sess.run(summary_op,
                                       feed_dict={
                                           x: tra_images,
                                           y_: tra_labels
                                       })
                tra_summary_writer.add_summary(summary_str, step)

            if step % 200 == 0 or (step + 1) == MAX_STEP:
                val_images, val_labels = sess.run(
                    [val_image_batch, val_label_batch])
                val_loss, val_acc = sess.run([loss, accuracy],
                                             feed_dict={
                                                 x: val_images,
                                                 y_: val_labels
                                             })
                print(
                    '**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **' %
                    (step, val_loss, val_acc))

                summary_str = sess.run(summary_op,
                                       feed_dict={
                                           x: val_images,
                                           y_: val_labels
                                       })
                val_summary_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
    end_time = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time()))
    print('end_time:', end_time)
Пример #18
0
def train():
    
    pre_trained_weights = './/vgg16.npy'
    style_image_dir = 'style_image/mosaic.jpg'
    image_file_dir = 'train2014'
    save_dir = 'myModel'
    exp_dir = 'myGenerate'
    now_exp_dir = myutil.makeExpDir(exp_dir)
    
## 读取一张固定的风格图像(实际数据) 
    #styImgReaded = data_io.get_style_batch(style_image_dir)
    styImgReaded = np.expand_dims(misc.imread(style_image_dir), axis=0)#---喂给style_image
    styImgReadedsize = styImgReaded.shape
    styImg_W = styImgReadedsize[1]
    styImg_H = styImgReadedsize[2]
    

## 读取一批内容图像(是通道,没有实际数据)
    ContImgReaded = data_io.get_cont_batch(image_file_dir, 256, 256, BATCH_SIZE, num_epochs)#---喂给content_images

                    
## 一张固定的风格图像 通道
    style_image = tf.placeholder(tf.float32, shape=[1, styImg_W, styImg_H, 3]) 

## 一批内容图像 的某层特征图 通道
    content_images = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
    
##一批风格转换网络的生成图像
#    generated_image = transform.net(content_images)
    generated_image = model.net(content_images, training=True)
    
## 风格损失
    style_gram_dict = myutil.img2Gram(style_image)#风格图像的Gram矩阵字典
    gen_gram_dict = myutil.img2Gram(generated_image)#生成图像的Gram矩阵字典
    style_loss = tools.style_loss(gen_gram_dict, style_gram_dict, style_weight)

## 内容损失
    content_map_dict = myutil.img2ContMap(content_images)#内容图像的内容特征字典
    generated_feature_maps = myutil.img2ContMap(generated_image)#生成图像的内容特征字典
    content_loss = tools.content_loss(generated_feature_maps[CONTENT_LAYER], content_map_dict[CONTENT_LAYER], content_weight)

## tv损失
#    tv_loss = tools.tv_loss(generated_image, tv_weight)
      
## 总损失
#    loss = content_loss + style_loss + tv_loss
    loss = content_loss + style_loss
## 变学习率   
    global_step = tf.Variable(0, trainable=False)

    learning_rate = tf.train.exponential_decay(learning_rate=1e-3, global_step=global_step,
                                           decay_steps=300, decay_rate=0.98, staircase=True)
    
## 训练操作    
    train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss, global_step=global_step)
    
    saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=1)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        # 加载vgg参数,跳过list中的参数不加载
        tools.load_with_skip(pre_trained_weights, sess, ['conv5_1','conv5_2','conv5_3','fc6','fc7','fc8'])  

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        print('in')
        img_index = 0
        try:        
            while True:
                img_index += 1
                
                #获取一批内容图像
                ContImgBatch = sess.run(ContImgReaded)
                       
                ## 计算风格图像 的各层特征图的Gram矩阵
                sess.run(train_op, feed_dict={style_image:styImgReaded, content_images:ContImgBatch})
                                     
                if img_index % 10 == 0:
                    tra_loss, res, now_lr = sess.run([loss,generated_image,learning_rate], feed_dict={style_image:styImgReaded, content_images:ContImgBatch})
                    misc.imsave(now_exp_dir+'/res'+str(img_index)+'.jpg', res[0])
                    
                    print ('img_index: %d, loss: %.4f, lr: %.3f*e-3' % (img_index, tra_loss, now_lr*1000))
                    

                if img_index % 100 == 0:#保存模型
                    checkpoint_path = os.path.join(save_dir, 'myModel.ckpt')
                    saver.save(sess, checkpoint_path, global_step=img_index)
                    
                print('img_index : ', img_index)
        
        except tf.errors.OutOfRangeError:
            print("done")
     
        finally:
            coord.request_stop()        
            
                        
        coord.join(threads)
        sess.close()
Пример #19
0
def train():
    pre_trained_weights = './/vgg16_pretrain//vgg16.npy'
    data_dir = './/data//cifar-10-batches-bin//'
    train_log_dir = './/logs//train//'
    val_log_dir = './/logs//val//'

    with tf.name_scope('input'):
        tra_image_batch, tra_label_batch = input_data.read_cifar10(
            data_dir=data_dir,
            is_train=True,
            batch_size=BATCH_SIZE,
            shuffle=True)
        val_image_batch, val_label_batch = input_data.read_cifar10(
            data_dir=data_dir,
            is_train=False,
            batch_size=BATCH_SIZE,
            shuffle=False)

    logits = VGG.VGG16N(tra_image_batch, N_CLASSES, IS_PRETRAIN)
    loss = tools.loss(logits, tra_label_batch)
    accuracy = tools.accuracy(logits, tra_label_batch)
    my_global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = tools.optimize(loss, learning_rate, my_global_step)

    x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
    y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE, N_CLASSES])

    saver = tf.train.Saver(tf.global_variables())
    summary_op = tf.summary.merge_all()

    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    # load the parameter file, assign the parameters, skip the specific layers
    tools.load_with_skip(pre_trained_weights, sess, ['fc6', 'fc7', 'fc8'])

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break

            tra_images, tra_labels = sess.run(
                [tra_image_batch, tra_label_batch])
            _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],
                                            feed_dict={
                                                x: tra_images,
                                                y_: tra_labels
                                            })
            if step % 50 == 0 or (step + 1) == MAX_STEP:
                print('Step: %d, loss: %.4f, accuracy: %.4f%%' %
                      (step, tra_loss, tra_acc))
                summary_str = sess.run(summary_op)
                tra_summary_writer.add_summary(summary_str, step)

            if step % 200 == 0 or (step + 1) == MAX_STEP:
                val_images, val_labels = sess.run(
                    [val_image_batch, val_label_batch])
                val_loss, val_acc = sess.run([loss, accuracy],
                                             feed_dict={
                                                 x: val_images,
                                                 y_: val_labels
                                             })
                print(
                    '**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **' %
                    (step, val_loss, val_acc))

                summary_str = sess.run(summary_op)
                val_summary_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
Пример #20
0
def finetune_train():

    pre_trained_weights = './vgg16_pretrain/vgg16.npy'

    with tf.name_scope('input'):

        train_batch, train_label_batch, _ = input_data.get_batch(
            train_txt, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
        val_batch, val_label_batch, _ = input_data.get_batch(
            val_txt, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

    x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
    y_ = tf.placeholder(tf.int32, shape=[BATCH_SIZE])

    model = models.model(x, N_CLASSES)
    model.VGG16()
    logits = model.fc8

    loss = tools.loss(logits, y_)
    acc = tools.accuracy(logits, y_)
    train_op = tools.optimize(loss, LEARNING_RATE)

    with tf.Session() as sess:

        saver = tf.train.Saver(tf.global_variables())
        sess.run(tf.global_variables_initializer())

        # load the parameter file, assign the parameters, skip the specific layers
        tools.load_with_skip(pre_trained_weights, sess, ['fc6', 'fc7', 'fc8'])

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        summary_op = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
        val_writer = tf.summary.FileWriter(logs_val_dir, sess.graph)

        try:
            for step in np.arange(MAX_STEP):
                if coord.should_stop():
                    break

                tra_images, tra_labels = sess.run(
                    [train_batch, train_label_batch])
                _, tra_loss, tra_acc = sess.run([train_op, loss, acc],
                                                feed_dict={
                                                    x: tra_images,
                                                    y_: tra_labels
                                                })

                if step % 50 == 0:
                    print(
                        'Step %d, train loss = %.4f, train accuracy = %.2f%%' %
                        (step, tra_loss, tra_acc))
                    summary_str = sess.run(summary_op,
                                           feed_dict={
                                               x: tra_images,
                                               y_: tra_labels
                                           })
                    train_writer.add_summary(summary_str, step)
                #
                if step % 200 == 0 or (step + 1) == MAX_STEP:
                    val_images, val_labels = sess.run(
                        [val_batch, val_label_batch])
                    val_loss, val_acc = sess.run([loss, acc],
                                                 feed_dict={
                                                     x: val_images,
                                                     y_: val_labels
                                                 })

                    print(
                        '**  Step %d, val loss = %.4f, val accuracy = %.2f%%  **'
                        % (step, val_loss, val_acc))
                    summary_str = sess.run(summary_op,
                                           feed_dict={
                                               x: val_images,
                                               y_: val_labels
                                           })
                    val_writer.add_summary(summary_str, step)
                #
                if step % 2000 == 0 or (step + 1) == MAX_STEP:
                    checkpoint_path = os.path.join(finetune_model_dir,
                                                   'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)

        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        finally:
            coord.request_stop()
        coord.join(threads)