コード例 #1
0
def make_tfrecord(recordfilename):
    trainitems = getTrainItems()
    trainitems = trainitems[:64]

    writer = tf.python_io.TFRecordWriter(recordfilename)

    cnt = 100
    for item in trainitems:
        frame_len, features, labellst = concurrent_get_items(item,
                                                             kind='train')
        features = np.pad(features, ((0, 600 - frame_len), (0, 0)), 'constant')
        labels = np.zeros(500, dtype=np.int32)
        for label in labellst:
            labels[label] = 1

        example = tf.train.Example(features=tf.train.Features(
            feature={
                'cnt': _int64_feature(cnt),
                'frame_len': _int64_feature(frame_len),
                'features': _bytes_feature(features.tostring()),
                'labels': _bytes_feature(labels.tostring())
            }))

        cnt += 1

        writer.write(example.SerializeToString())

    writer.close()
コード例 #2
0
def test():

    kind = 'train'
    items = getTrainItems()
    item = items[0]

    filenamequeue = tf.train.string_input_producer(
        ['/mnt/md0/LSVC/inc_tfrecords/val_tf_0_10239.tfrecord'])

    a, b, c, d = read_and_decode(filenamequeue, 10)
    a1, b1, c1, d1 = read_and_decode(filenamequeue, 10)

    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())

    with tf.Session() as sess:
        sess.run(init_op)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord, sess=sess)

        for i in range(20):
            A, B, C, D = sess.run([a, b, c, d])
            print('-->', D)
            # A,B,C,D = sess.run([a1,b1,c1,d1])

        coord.request_stop()
        coord.join(threads)
コード例 #3
0
import random
import numpy as np

from TFFusions.toolkits.dataloader import getTrainItems, getValItems, Load_Features_SENET, concurrent_get_items
from TFFusions.SecondLevel.GCForest import *

train_items = getTrainItems()

Load_Features_SENET(train_items[1][0], kind='train', limitlen=10)

rid = random.choices(list(range(len(train_items))), k=1000)

train_X = list()
train_Y = list()

for i in range(100):

    try:
        videolen, feature, label = concurrent_get_items(
            train_items[rid[i]], kind='train', load_func=Load_Features_SENET)
    except Exception as E:
        continue

    id = random.randint(0, videolen)
    randlen = random.choices(list(range(videolen)), k=1)

    f = list()
    for j in randlen:
        f.append(feature[j, :])
    f = np.array(f)
    f = np.reshape(f, (-1))
コード例 #4
0
    if kind == 'train':
        prefix = Config.DATA_PATH + 'trainval/'
    elif kind == 'val':
        prefix = Config.DATA_PATH + 'trainval/'
    elif kind == 'test':
        prefix = Config.DATA_PATH + 'test/'
    else:
        raise NotImplementedError
    # videoname example : lsvc000000
    filename = prefix + '{}_fc6_vgg19_frame.binary'.format(videoname)
    frame_features = np.fromfile(filename, dtype='float32')
    return frame_features


valitems = getValItems()
trainitems = getTrainItems()
testitems = getTestItems()


@jit
def online_variance(data):
    n = 0
    mean = 0.0
    M2 = 0.0
    for x in data:
        n += 1
        delta = x - mean
        mean += delta / n
        delta2 = x - mean
        M2 += delta * delta2
    return M2 / n, mean
コード例 #5
0
def main(config_yaml=None):
    train_config = config_yaml or Config.TRAIN_SCRIPT + 'lstm-memory-cell1024.yaml'
    LOAD_YAML_TO_FLAG(train_config)
    FLAGS = Get_GlobalFLAG()

    if os.path.exists(FLAGS.train_dir) == False:
        print('mk train dir {}'.format(FLAGS.train_dir))
        os.mkdir(FLAGS.train_dir)

    train_items = getTrainItems()
    val_items = getValItems()
    batchsize = FLAGS.batchsize

    inputs = tf.placeholder(dtype=tf.float32, shape=(None, 600, 4096))
    num_frames = tf.placeholder(dtype=tf.int32, shape=(None))
    target_labels = tf.placeholder(dtype=tf.int32, shape=(None, FLAGS.vocab_size))

    model = GetFrameModel(FLAGS.frame_level_model)()
    lossfunc = SoftmaxLoss()

    predict_labels = model.create_model(model_input=inputs, vocab_size=FLAGS.vocab_size, num_frames=num_frames)
    predict_labels = predict_labels['predictions']
    loss = lossfunc.calculate_loss(predict_labels, target_labels)

    global_step = tf.Variable(0, trainable=False)
    decayed_learning_rate = tf.train.exponential_decay(FLAGS.base_learning_rate,
                                                       global_step,
                                                       FLAGS.decay_at_epoch,
                                                       FLAGS.learning_rate_decay,
                                                       staircase=True)

    optimizer_class = find_class_by_name(FLAGS.optimize, [tf.train])
    train_op = optimizer_class(decayed_learning_rate).minimize(loss)

    # LOG
    log_prefix_name = '{}_{}'.format(FLAGS.name, FLAGS.EX_ID)
    # python's logging
    pylog = logging.getLogger(log_prefix_name)
    pylog.setLevel(logging.DEBUG)
    fh = logging.FileHandler(FLAGS.train_dir + '/' + log_prefix_name + '.log')
    ch = logging.StreamHandler()
    formatter = logging.Formatter('%(asctime)s-%(name)s-%(levelname)s: %(message)s')
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)
    pylog.addHandler(fh)
    pylog.addHandler(ch)
    # tfboard's log
    logger = Logger(FLAGS.train_dir + log_prefix_name)

    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    tf_config.allow_soft_placement = True
    tf_config.log_device_placement = True
    sess = tf.Session(config=tf_config)
    sess.run(tf.global_variables_initializer())

    # Load Queue
    pq_train = PictureQueue(kind='train', batchsize=batchsize, worker=5)
    pq_test = PictureQueue(kind='val', batchsize=batchsize, worker=3)

    # save ( after session )
    Saver = tf.train.Saver(max_to_keep=20, keep_checkpoint_every_n_hours=2)

    if FLAGS.model_checkpoint_path is not None:
        print('load model from {} ...'.format(FLAGS.model_checkpoint_path))
        Saver.restore(sess=sess, save_path=FLAGS.model_checkpoint_path)
        print('Success !!!')

    cnt = 0

    for epoch in range(FLAGS.num_epochs):
        loop = len(train_items) // batchsize
        for i in range(loop):

            # l = i*batchsize
            # r = l+batchsize
            # items = train_items[l:r]
            # features, video_frames, target_label = gen_tf_input(items,'train')

            features, video_frames, target_label = pq_train.Get()

            video_frames = np.array(video_frames)

            fd = {inputs: features, target_labels: target_label, num_frames: video_frames}
            loss_value, _ = sess.run([loss, train_op], feed_dict=fd)

            logger.scalar_summary(log_prefix_name + '/train_loss', loss_value, cnt)
            pylog.info('cnt: {} train_loss: {}'.format(cnt, loss_value))

            if cnt % 50 == 0:
                fd = {inputs: features, target_labels: target_label, num_frames: video_frames}
                predict = sess.run(predict_labels, feed_dict=fd)
                train_meanap = mean_ap(predict, target_label)
                acc = accuracy(predict, target_labels, topk=(1, 5, 10))

                logger.scalar_summary(log_prefix_name + '/train_mAP', train_meanap, cnt)
                logger.scalar_summary(log_prefix_name + '/train_acc@1', acc[0], cnt)
                logger.scalar_summary(log_prefix_name + '/train_acc@5', acc[1], cnt)
                logger.scalar_summary(log_prefix_name + '/train_acc@10', acc[2], cnt)

                pylog.info('cnt: {} train_mAP: {}'.format(cnt, train_meanap))
                pylog.info('cnt: {} train_acc@1: {}'.format(cnt, acc[0]))
                pylog.info('cnt: {} train_acc@5: {}'.format(cnt, acc[1]))
                pylog.info('cnt: {} train_acc@10: {}'.format(cnt, acc[2]))

                # items = random.choices(val_items,k=FLAGS.batchsize)
                # features, video_frames, target_label = gen_tf_input(items,'val')
                features, video_frames, target_label = pq_test.Get()

                fd = {inputs: features, target_labels: target_label, num_frames: video_frames}
                predict, test_loss = sess.run([predict_labels, loss], feed_dict=fd)
                test_meanap = mean_ap(predict, target_label)
                acc = accuracy(predict, target_label, topk=(1, 5, 10))

                logger.scalar_summary(log_prefix_name + '/test_mAP', test_meanap, cnt)
                logger.scalar_summary(log_prefix_name + '/test_acc@1', acc[0], cnt)
                logger.scalar_summary(log_prefix_name + '/test_acc@5', acc[1], cnt)
                logger.scalar_summary(log_prefix_name + '/test_acc@10', acc[2], cnt)
                logger.scalar_summary(log_prefix_name + '/test_loss', test_loss, cnt)

                pylog.info('cnt: {} test_mAP: {}'.format(cnt, test_meanap))
                pylog.info('cnt: {} test_loss: {}'.format(cnt, test_loss))
                pylog.info('cnt: {} test_acc@1: {}'.format(cnt, acc[0]))
                pylog.info('cnt: {} test_acc@5: {}'.format(cnt, acc[1]))
                pylog.info('cnt: {} test_acc@10: {}'.format(cnt, acc[2]))

            if cnt % 2000 == 0:
                savepath = FLAGS.train_dir + log_prefix_name + '_save{:03}.ckpt'.format(cnt)
                Saver.save(sess, savepath, cnt)
                pylog.info('save model:{} at {}.'.format(FLAGS.name, savepath))

            cnt += 1
コード例 #6
0
import tensorflow as tf
from TFFusions.toolkits.dataloader import getTrainItems, getValItems

# distill tf record
feature_file = '/mnt/md0/LSVC/feat_senet'

cnt = 0

second_level = []
with open('', 'r') as f:
    line = f.readline()
    second_level.append()

for item in getTrainItems():
    labels = item[1]

for item in getValItems():
    cnt += 1