Esempio n. 1
0
def train(args, sess, epoch, image_list, label_list, index_dequeue_op,
          enqueue_op, image_paths_placeholder, labels_placeholder,
          learning_rate_placeholder, phase_train_placeholder,
          batch_size_placeholder, global_step, loss, train_op, summary_op,
          summary_writer, regularization_losses, learning_rate_schedule_file):
    batch_number = 0

    if args.learning_rate > 0.0:
        lr = args.learning_rate
    else:
        lr = facenet.get_learning_rate_from_file(learning_rate_schedule_file,
                                                 epoch)

    index_epoch = sess.run(index_dequeue_op)
    label_epoch = np.array(label_list)[index_epoch]
    image_epoch = np.array(image_list)[index_epoch]

    # Enqueue one epoch of image paths and labels
    labels_array = np.expand_dims(np.array(label_epoch), 1)
    image_paths_array = np.expand_dims(np.array(image_epoch), 1)
    sess.run(
        enqueue_op, {
            image_paths_placeholder: image_paths_array,
            labels_placeholder: labels_array
        })

    # Training loop
    train_time = 0
    while batch_number < args.epoch_size:
        start_time = time.time()
        feed_dict = {
            learning_rate_placeholder: lr,
            phase_train_placeholder: True,
            batch_size_placeholder: args.batch_size
        }
        if (batch_number % 100 == 0):
            err, _, step, reg_loss, summary_str = sess.run([
                loss, train_op, global_step, regularization_losses, summary_op
            ],
                                                           feed_dict=feed_dict)
            summary_writer.add_summary(summary_str, global_step=step)
        else:
            err, _, step, reg_loss = sess.run(
                [loss, train_op, global_step, regularization_losses],
                feed_dict=feed_dict)
        duration = time.time() - start_time
        print('Epoch: [%d][%d/%d]\tTime %.3f\tLoss %2.3f\tRegLoss %2.3f' %
              (epoch, batch_number + 1, args.epoch_size, duration, err,
               np.sum(reg_loss)))
        batch_number += 1
        train_time += duration
    # Add validation loss and accuracy to summary
    summary = tf.Summary()
    #pylint: disable=maybe-no-member
    summary.value.add(tag='time/total', simple_value=train_time)
    summary_writer.add_summary(summary, step)
    return step
def train(args, sess, src_dataset, tgt_dataset, epoch, image_paths_placeholder,
          labels_placeholder, labels_batch, batch_size_placeholder,
          learning_rate_placeholder, phase_train_placeholder, enqueue_op,
          input_queue, global_step, embeddings, total_loss, triplet_loss,
          adv_loss, reg_loss, train_op, summary_writer,
          learning_rate_schedule_file, embedding_size):
    batch_number = 0

    if args.learning_rate > 0.0:
        lr = args.learning_rate
    else:
        lr = facenet.get_learning_rate_from_file(learning_rate_schedule_file,
                                                 epoch)
    while batch_number < args.epoch_size:
        # Sample people randomly from the dataset
        src_image_paths, src_num_per_class = sample_people(
            src_dataset, args.people_per_batch, args.images_per_person)
        tgt_image_paths, tgt_num_per_class = sample_people(
            tgt_dataset,
            args.people_per_batch,
            args.images_per_person,
            nb_images=len(src_image_paths) / 3)

        print('Running forward pass on sampled images: ', end='')
        start_time = time.time()

        src_image_paths_array = np.reshape(
            np.expand_dims(np.array(src_image_paths), 1), (-1, 3))
        tgt_image_paths_array = np.expand_dims(np.array(tgt_image_paths),
                                               axis=0).T
        image_paths_array = np.concatenate(
            (src_image_paths_array, tgt_image_paths_array), axis=1)

        nrof_examples = image_paths_array.size
        labels_array = np.reshape(np.arange(nrof_examples), (-1, 4))

        sess.run(
            enqueue_op, {
                image_paths_placeholder: image_paths_array,
                labels_placeholder: labels_array
            })
        emb_array = np.zeros((nrof_examples, embedding_size))
        nrof_batches = int(np.ceil(nrof_examples / args.batch_size))
        for i in range(nrof_batches):
            batch_size = min(nrof_examples - i * args.batch_size,
                             args.batch_size)
            emb, lab = sess.run(
                [embeddings, labels_batch],
                feed_dict={
                    batch_size_placeholder: batch_size,
                    learning_rate_placeholder: lr,
                    phase_train_placeholder: True
                })
            emb_array[lab, :] = emb
        emb_array = np.lib.stride_tricks.as_strided(
            emb_array, (nrof_examples // 4, 4, embedding_size))
        src_emb_array = np.reshape(emb_array[:, 0:3], (-1, embedding_size))
        tgt_emb_array = emb_array[:, 3]
        print('%.3f' % (time.time() - start_time))

        # Select triplets based on the embeddings
        print('Selecting suitable triplets for training')
        quadruplets, nrof_random_negs, nrof_quadruplets = adversarialloss.select_quadruplets(
            src_emb_array, tgt_emb_array, src_num_per_class, src_image_paths,
            tgt_image_paths, args.people_per_batch, args.alpha)
        selection_time = time.time() - start_time
        print(
            '(nrof_random_negs, nrof_triplets) = (%d, %d): time=%.3f seconds' %
            (nrof_random_negs, nrof_quadruplets, selection_time))

        # Perform training on the selected quadruplets
        nrof_batches = int(np.ceil(nrof_quadruplets * 4 / args.batch_size))
        quadruplets_paths = list(itertools.chain(*quadruplets))
        labels_array = np.reshape(np.arange(len(quadruplets_paths)), (-1, 4))
        quadruplets_paths_array = np.reshape(
            np.expand_dims(np.array(quadruplets_paths), 1), (-1, 4))
        sess.run(
            enqueue_op, {
                image_paths_placeholder: quadruplets_paths_array,
                labels_placeholder: labels_array
            })
        nrof_examples = len(quadruplets_paths)
        train_time = 0
        i = 0
        emb_array = np.zeros((nrof_examples, embedding_size))
        loss_array = np.zeros((nrof_quadruplets, ))
        summary = tf.Summary()
        step = 0
        while i < nrof_batches:
            start_time = time.time()
            batch_size = min(nrof_examples - i * args.batch_size,
                             args.batch_size)
            feed_dict = {
                batch_size_placeholder: batch_size,
                learning_rate_placeholder: lr,
                phase_train_placeholder: True
            }
            triplet_err, adv_err, total_err, _, step, emb, lab = sess.run(
                [
                    triplet_loss, adv_loss, total_loss, train_op, global_step,
                    embeddings, labels_batch
                ],
                feed_dict=feed_dict)
            #emb_array[lab,:] = emb
            #loss_array[i] = err
            duration = time.time() - start_time
            print(
                'Epoch: [%d][%d/%d]\tTime %.3f\nTotal loss %2.3f\nTriplet loss %2.3f\nAdv loss %2.3f'
                % (epoch, batch_number + 1, args.epoch_size, duration,
                   total_err, triplet_err, adv_err))
            batch_number += 1
            i += 1
            train_time += duration
            summary.value.add(tag='loss/total_loss', simple_value=total_err)
            summary.value.add(tag='loss/triplet_loss',
                              simple_value=triplet_err)
            summary.value.add(tag='loss/adv_loss', simple_value=adv_err)
            #summary.value.add(tag='loss/regularisationL2_err', simple_value=reg_err)

        # Add validation loss and accuracy to summary
        #pylint: disable=maybe-no-member
        summary.value.add(tag='time/selection', simple_value=selection_time)
        summary_writer.add_summary(summary, step)

    return step
def train(args, sess, epoch, image_list, label_list, index_dequeue_op,
          enqueue_op, image_paths_placeholder, labels_placeholder,
          learning_rate_placeholder, phase_train_placeholder,
          batch_size_placeholder, control_placeholder, step, loss, train_op,
          summary_op, summary_writer, reg_losses, learning_rate_schedule_file,
          stat, cross_entropy_mean, accuracy, learning_rate, prelogits,
          prelogits_center_loss, random_rotate, random_crop, random_flip,
          prelogits_norm, prelogits_hist_max, use_fixed_image_standardization):
    batch_number = 0

    if args.learning_rate > 0.0:
        lr = args.learning_rate
    else:
        lr = facenet.get_learning_rate_from_file(learning_rate_schedule_file,
                                                 epoch)

    if lr <= 0:
        return False

    index_epoch = sess.run(index_dequeue_op)
    label_epoch = np.array(label_list)[index_epoch]
    image_epoch = np.array(image_list)[index_epoch]

    # Enqueue one epoch of image paths and labels
    labels_array = np.expand_dims(np.array(label_epoch), 1)
    image_paths_array = np.expand_dims(np.array(image_epoch), 1)
    control_value = facenet.RANDOM_ROTATE * random_rotate + facenet.RANDOM_CROP * random_crop + facenet.RANDOM_FLIP * random_flip + facenet.FIXED_STANDARDIZATION * use_fixed_image_standardization
    control_array = np.ones_like(labels_array) * control_value
    sess.run(
        enqueue_op, {
            image_paths_placeholder: image_paths_array,
            labels_placeholder: labels_array,
            control_placeholder: control_array
        })

    # Training loop
    train_time = 0
    while batch_number < args.epoch_size:
        start_time = time.time()
        feed_dict = {
            learning_rate_placeholder: lr,
            phase_train_placeholder: True,
            batch_size_placeholder: args.batch_size
        }
        tensor_list = [
            loss, train_op, step, reg_losses, prelogits, cross_entropy_mean,
            learning_rate, prelogits_norm, accuracy, prelogits_center_loss
        ]
        if batch_number % 100 == 0:
            loss_, _, step_, reg_losses_, prelogits_, cross_entropy_mean_, lr_, prelogits_norm_, accuracy_, center_loss_, summary_str = sess.run(
                tensor_list + [summary_op], feed_dict=feed_dict)
            summary_writer.add_summary(summary_str, global_step=step_)
        else:
            loss_, _, step_, reg_losses_, prelogits_, cross_entropy_mean_, lr_, prelogits_norm_, accuracy_, center_loss_ = sess.run(
                tensor_list, feed_dict=feed_dict)

        duration = time.time() - start_time
        stat['loss'][step_ - 1] = loss_
        stat['center_loss'][step_ - 1] = center_loss_
        stat['reg_loss'][step_ - 1] = np.sum(reg_losses_)
        stat['xent_loss'][step_ - 1] = cross_entropy_mean_
        stat['prelogits_norm'][step_ - 1] = prelogits_norm_
        stat['learning_rate'][epoch - 1] = lr_
        stat['accuracy'][step_ - 1] = accuracy_
        stat['prelogits_hist'][epoch - 1, :] += \
        np.histogram(np.minimum(np.abs(prelogits_), prelogits_hist_max), bins=1000, range=(0.0, prelogits_hist_max))[0]

        duration = time.time() - start_time
        print(
            'Epoch: [%d][%d/%d]\tTime %.3f\tLoss %2.3f\tXent %2.3f\tRegLoss %2.3f\tAccuracy %2.3f\tLr %2.5f\tCl %2.3f'
            % (epoch, batch_number + 1,
               args.epoch_size, duration, loss_, cross_entropy_mean_,
               np.sum(reg_losses_), accuracy_, lr_, center_loss_))
        batch_number += 1
        train_time += duration
    # Add validation loss and accuracy to summary
    summary = tf.Summary()
    # pylint: disable=maybe-no-member
    summary.value.add(tag='time/total', simple_value=train_time)
    summary_writer.add_summary(summary, global_step=step_)
    return True
Esempio n. 4
0
def train(args, sess, dataset, epoch, image_paths_placeholder,
          labels_placeholder, labels_batch, batch_size_placeholder,
          learning_rate_placeholder, phase_train_placeholder, enqueue_op,
          input_queue, global_step, embeddings, loss, train_op, summary_writer,
          learning_rate_schedule_file, embedding_size):
    batch_number = 0

    if args.learning_rate > 0.0:
        lr = args.learning_rate
    else:
        lr = facenet.get_learning_rate_from_file(learning_rate_schedule_file,
                                                 epoch)
    while batch_number < args.epoch_size:
        # Sample people randomly from the dataset
        video_paths, still_paths, num_per_class = sample_people(
            dataset, args.people_per_batch, args.images_per_person)

        # Video samples forward pass
        print('Running forward pass on video sampled images: ', end='')
        start_time = time.time()
        nrof_examples = args.people_per_batch * args.images_per_person
        labels_array = np.reshape(np.arange(nrof_examples), (-1, 3))
        image_paths_array = np.reshape(
            np.expand_dims(np.array(video_paths), 1), (-1, 3))
        sess.run(
            enqueue_op, {
                image_paths_placeholder: image_paths_array,
                labels_placeholder: labels_array
            })
        video_emb_array = np.zeros((nrof_examples, embedding_size))
        nrof_batches = int(np.ceil(nrof_examples / args.batch_size))
        for i in range(nrof_batches):
            batch_size = min(nrof_examples - i * args.batch_size,
                             args.batch_size)
            video_emb, lab = sess.run(
                [embeddings, labels_batch],
                feed_dict={
                    batch_size_placeholder: batch_size,
                    learning_rate_placeholder: lr,
                    phase_train_placeholder: True
                })
            video_emb_array[lab, :] = video_emb
        print('%.3f' % (time.time() - start_time))

        # Still samples forward pass
        print('Running forward pass on still sampled images: ', end='')
        start_time = time.time()
        nrof_examples = args.people_per_batch  # * args.images_per_person
        labels_array = np.reshape(np.arange(nrof_examples), (-1, 3))
        still_paths_array = np.reshape(
            np.expand_dims(np.array(still_paths), 1), (-1, 3))
        sess.run(
            enqueue_op, {
                image_paths_placeholder: still_paths_array,
                labels_placeholder: labels_array
            })
        still_emb_array = np.zeros((nrof_examples, embedding_size))
        nrof_batches = int(np.ceil(nrof_examples / args.batch_size))
        for i in range(nrof_batches):
            batch_size = min(nrof_examples - i * args.batch_size,
                             args.batch_size)
            still_emb, lab = sess.run(
                [embeddings, labels_batch],
                feed_dict={
                    batch_size_placeholder: batch_size,
                    learning_rate_placeholder: lr,
                    phase_train_placeholder: True
                })
            still_emb_array[lab, :] = still_emb
        print('%.3f' % (time.time() - start_time))

        # Select triplets based on the embeddings
        print('Selecting suitable triplets for training')
        triplets, nrof_random_negs, nrof_triplets = tripletloss.select_triplets_cox(
            video_emb_array, still_emb_array, num_per_class, video_paths,
            still_paths, args.people_per_batch, args.alpha)
        selection_time = time.time() - start_time
        print(
            '(nrof_random_negs, nrof_triplets) = (%d, %d): time=%.3f seconds' %
            (nrof_random_negs, nrof_triplets, selection_time))

        # Perform training on the selected triplets
        nrof_batches = int(np.ceil(nrof_triplets * 3 / args.batch_size))
        triplet_paths = list(itertools.chain(*triplets))
        labels_array = np.reshape(np.arange(len(triplet_paths)), (-1, 3))
        triplet_paths_array = np.reshape(
            np.expand_dims(np.array(triplet_paths), 1), (-1, 3))
        sess.run(
            enqueue_op, {
                image_paths_placeholder: triplet_paths_array,
                labels_placeholder: labels_array
            })
        nrof_examples = len(triplet_paths)
        train_time = 0
        i = 0
        emb_array = np.zeros((nrof_examples, embedding_size))
        loss_array = np.zeros((nrof_triplets, ))
        summary = tf.Summary()
        step = 0
        while i < nrof_batches:
            start_time = time.time()
            batch_size = min(nrof_examples - i * args.batch_size,
                             args.batch_size)
            feed_dict = {
                batch_size_placeholder: batch_size,
                learning_rate_placeholder: lr,
                phase_train_placeholder: True
            }
            err, _, step, emb, lab = sess.run(
                [loss, train_op, global_step, embeddings, labels_batch],
                feed_dict=feed_dict)
            #emb_array[lab,:] = emb
            #loss_array[i] = err
            duration = time.time() - start_time
            print('Epoch: [%d][%d/%d]\tTime %.3f\tLoss %2.3f' %
                  (epoch, batch_number + 1, args.epoch_size, duration, err))
            batch_number += 1
            i += 1
            train_time += duration
            summary.value.add(tag='loss', simple_value=err)

        # Add validation loss and accuracy to summary
        #pylint: disable=maybe-no-member
        summary.value.add(tag='time/selection', simple_value=selection_time)
        summary_writer.add_summary(summary, step)
    return step
def train(args, sess, dataset, epoch, image_paths_placeholder,
          labels_placeholder, labels_batch, batch_size_placeholder,
          learning_rate_placeholder, phase_train_placeholder, enqueue_op,
          input_queue, global_step, embeddings, loss, train_op, summary_op,
          summary_writer, learning_rate_schedule_file, embedding_size, anchor,
          positive, negative, triplet_loss):
    batch_number = 0

    if args.learning_rate > 0.0:
        lr = args.learning_rate
    else:
        lr = facenet.get_learning_rate_from_file(learning_rate_schedule_file,
                                                 epoch)
        #下面的一个while循环运行一个批处理
    while batch_number < args.epoch_size:
        # Sample people randomly from the dataset
        #默认下sample返回了1800,即1800个图像
        image_paths, num_per_class = sample_people(dataset,
                                                   args.people_per_batch,
                                                   args.images_per_person)
        #以上的输出里很多都是一张图片,那么这种肯定没有办法作为a和p,程序是怎么筛选出去的呢?
        print('Running forward pass on sampled images: ', end='')
        start_time = time.time()
        nrof_examples = args.people_per_batch * args.images_per_person
        # 根据默认参数,将输入的1800个图像变成了600*3的二维数据,如果不够1800张图像,可能要修改这里
        labels_array = np.reshape(np.arange(nrof_examples),
                                  (-1, 3))  #reshape(a,(-1,3))表示只给定列数为3,行数自行算出
        image_paths_array = np.reshape(
            np.expand_dims(np.array(image_paths), 1), (-1, 3))
        #这里开辟了一个新的线程用于在内存里读取数据
        sess.run(
            enqueue_op, {
                image_paths_placeholder: image_paths_array,
                labels_placeholder: labels_array
            })
        # 默认的参数中,nrof_examples是1800,embedding_size是128
        emb_array = np.zeros((nrof_examples, embedding_size))
        # nrof_batches,默认是1800/90=20
        nrof_batches = int(np.ceil(nrof_examples / args.batch_size))
        # 批处理求特征,默认为20个批
        for i in range(nrof_batches):
            batch_size = min(nrof_examples - i * args.batch_size,
                             args.batch_size)
            emb, lab = sess.run(
                [embeddings, labels_batch],
                feed_dict={
                    batch_size_placeholder: batch_size,
                    learning_rate_placeholder: lr,
                    phase_train_placeholder: True
                })
            emb_array[lab, :] = emb
        print('%.3f' % (time.time() - start_time))

        # Select triplets based on the embeddings
        print('Selecting suitable triplets for training')
        triplets, nrof_random_negs, nrof_triplets = select_triplets(
            emb_array, num_per_class, image_paths, args.people_per_batch,
            args.alpha)
        selection_time = time.time() - start_time
        print(
            '(nrof_random_negs, nrof_triplets) = (%d, %d): time=%.3f seconds' %
            (nrof_random_negs, nrof_triplets, selection_time))

        # Perform training on the selected triplets

        nrof_batches = int(np.ceil(nrof_triplets * 3 / args.batch_size))
        triplet_paths = list(itertools.chain(*triplets))
        labels_array = np.reshape(np.arange(len(triplet_paths)), (-1, 3))
        triplet_paths_array = np.reshape(
            np.expand_dims(np.array(triplet_paths), 1), (-1, 3))
        # 读取数据的操作
        sess.run(
            enqueue_op, {
                image_paths_placeholder: triplet_paths_array,
                labels_placeholder: labels_array
            })
        nrof_examples = len(triplet_paths)
        train_time = 0
        i = 0
        emb_array = np.zeros((nrof_examples, embedding_size))
        loss_array = np.zeros((nrof_triplets, ))
        # 根据求出的特征计算triplet损失函数并进行优化
        summary = tf.Summary()
        while i < nrof_batches:
            start_time = time.time()
            batch_size = min(nrof_examples - i * args.batch_size,
                             args.batch_size)
            feed_dict = {
                batch_size_placeholder: batch_size,
                learning_rate_placeholder: lr,
                phase_train_placeholder: True
            }
            #sess run 有5个输入,fetches,先运行loss。前向计算的损失,train_op是根据损失来计算梯度,来对参数进行优化
            err, _, step, emb, lab = sess.run(
                [loss, train_op, global_step, embeddings, labels_batch],
                feed_dict=feed_dict)
            emb_array[lab, :] = emb
            loss_array[i] = err
            duration = time.time() - start_time
            print('Epoch: [%d][%d/%d]\tTime %.3f\tLoss %2.3f' %
                  (epoch, batch_number + 1, args.epoch_size, duration, err))
            batch_number += 1
            i += 1
            train_time += duration
            summary.value.add(tag='loss', simple_value=err)

        # Add validation loss and accuracy to summary
        #pylint: disable=maybe-no-member
        summary.value.add(tag='time/selection', simple_value=selection_time)
        summary_writer.add_summary(summary, step)
    return step
Esempio n. 6
0
def train(args, sess, dataset, epoch, image_paths_placeholder, labels_placeholder, labels_batch,
          batch_size_placeholder, learning_rate_placeholder, phase_train_placeholder, enqueue_op, input_queue, global_step, 
          embeddings, loss, train_op, summary_op, summary_writer, learning_rate_schedule_file,
          embedding_size, anchor, positive, negative, triplet_loss):
    batch_number = 0
    
    if args.learning_rate>0.0:
        lr = args.learning_rate
    else:
        lr = facenet.get_learning_rate_from_file(learning_rate_schedule_file, epoch)
    while batch_number < args.epoch_size:
        # Sample people randomly from the dataset
        image_paths, num_per_class = sample_people(dataset, args.people_per_batch, args.images_per_person)
        #image_paths = np.load('/home/xjyu/xhhu/facenet/npy_files/image_paths.npy')###xhhu
        #for k in range(1,len(image_paths)): ###xhhu
        #    image_paths[k]=image_paths[0]###xhhu

        #np.save('image_paths_ori.npy',image_paths)###xhhu

        print('Running forward pass on sampled images: ', end='')
        start_time = time.time()
        nrof_examples = args.people_per_batch * args.images_per_person
        labels_array = np.reshape(np.arange(nrof_examples),(-1,3))
        image_paths_array = np.reshape(np.expand_dims(np.array(image_paths),1), (-1,3))
        sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array})
        emb_array = np.zeros((nrof_examples, embedding_size))
        nrof_batches = int(np.ceil(nrof_examples / args.batch_size))
        for i in xrange(nrof_batches):
            batch_size = min(nrof_examples-i*args.batch_size, args.batch_size)
            emb, lab = sess.run([embeddings, labels_batch], feed_dict={batch_size_placeholder: batch_size, 
                learning_rate_placeholder: lr, phase_train_placeholder: True})
            emb_array[lab,:] = emb
            #np.save('emb_array_ori.npy',emb)###xhhu
            #quit()###xhhu
        print('time=%.3f seconds' % (time.time()-start_time))

        # Select triplets based on the embeddings
        print('Selecting suitable triplets for training')
        triplets, nrof_random_negs, nrof_triplets = select_triplets(emb_array, num_per_class, 
                                                                    image_paths, args.people_per_batch, args.alpha, args.batch_size)
        selection_time = time.time() - start_time
        print('(nrof_random_negs, nrof_triplets) = (%d, %d): time=%.3f seconds' % 
            (nrof_random_negs, nrof_triplets, selection_time))

        # Perform training on the selected triplets
        nrof_batches = int(np.ceil(nrof_triplets*3/args.batch_size))
        triplet_paths = list(itertools.chain(*triplets))
        labels_array = np.reshape(np.arange(len(triplet_paths)),(-1,3))
        triplet_paths_array = np.reshape(np.expand_dims(np.array(triplet_paths),1), (-1,3))
        sess.run(enqueue_op, {image_paths_placeholder: triplet_paths_array, labels_placeholder: labels_array})
        nrof_examples = len(triplet_paths)
        train_time = 0
        i = 0
        emb_array = np.zeros((nrof_examples, embedding_size)) #no use fbtian
        loss_array = np.zeros((nrof_triplets,))#no use fbtian
        while i < nrof_batches:
            start_time = time.time()
            batch_size = min(nrof_examples-i*args.batch_size, args.batch_size)
            feed_dict = {batch_size_placeholder: batch_size, learning_rate_placeholder: lr, phase_train_placeholder: True}
            err, _, step, emb, lab = sess.run([loss, train_op, global_step, embeddings, labels_batch], feed_dict=feed_dict)
            #print(emb)
            emb_array[lab,:] = emb #no use fbtian
            loss_array[i] = err #no use fbtian
            duration = time.time() - start_time
            print('Epoch: [%d][%d/%d]\tTime %.3f\tLoss %2.5f' %
                  (epoch, batch_number+1, args.epoch_size, duration, err))
            batch_number += 1
            i += 1
            train_time += duration
            
        # Add validation loss and accuracy to summary
        summary = tf.Summary()
        #pylint: disable=maybe-no-member
        summary.value.add(tag='time/selection', simple_value=selection_time)
        summary_writer.add_summary(summary, step)
    return step