Пример #1
0
def process_video(data_info, name, mode, is_training=False):
    """ Get video clip and label from data info list."""
    data = Action_Dataset(name, mode, [data_info])
    if is_training:
        clip_seq, label_seq = data.next_batch(1, _CLIP_SIZE)
    else:
        clip_seq, label_seq = data.get_element(1, _CLIP_SIZE)
    clip_seq = 2 * (clip_seq / 255) - 1
    clip_seq = np.array(clip_seq, dtype='float32')
    return clip_seq, label_seq
Пример #2
0
def process_video(data_info, name, mode, is_training=True):
    """ Get video clip and label from data info list."""
    data = Action_Dataset(name, mode, [data_info])
    if is_training:
        clip_seq, label_seq = data.next_batch(1, _CLIP_SIZE)
    else:
        clip_seq, label_seq = data.next_batch(
            1, _EACH_VIDEO_TEST_SIZE+1, shuffle=False, data_augment=False)
    clip_seq = 2*(clip_seq/255) - 1
    clip_seq = np.array(clip_seq, dtype='float32')
    return clip_seq, label_seq
Пример #3
0
def main(dataset='clipped_data', mode='rgb', split=1, investigate=0):
    assert mode in ['rgb', 'flow'], 'Only RGB data and flow data is supported'
    log_dir = os.path.join(_LOG_ROOT,
                           'finetune-%s-%s-%d' % (dataset, mode, split))
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    logging.basicConfig(level=logging.INFO,
                        filename=os.path.join(log_dir, 'log.txt'),
                        filemode='w',
                        format='%(message)s')

    ##  Data Preload  ###
    train_info, test_info = split_data(
        os.path.join('./data', dataset, mode + '.csv'),
        os.path.join('./data', dataset, 'testlist%02d' % split + '.txt'))
    train_data = Action_Dataset(dataset, mode, train_info)
    test_data = Action_Dataset(dataset, mode, test_info)

    num_train_sample = len(train_info)
    train_info_tensor = tf.constant(train_info)
    test_info_tensor = tf.constant(test_info)

    train_info_dataset = tf.data.Dataset.from_tensor_slices(
        (train_info_tensor))
    train_info_dataset = train_info_dataset.shuffle(
        buffer_size=num_train_sample)
    train_dataset = train_info_dataset.map(
        lambda x: _get_data_label_from_info(x, dataset, mode),
        num_parallel_calls=_NUM_PARALLEL_CALLS)
    train_dataset = train_dataset.repeat().batch(_BATCH_SIZE)
    train_dataset = train_dataset.prefetch(buffer_size=_PREFETCH_BUFFER_SIZE)

    test_info_dataset = tf.data.Dataset.from_tensor_slices((test_info_tensor))
    test_dataset = test_info_dataset.map(
        lambda x: _get_data_label_from_info(x, dataset, mode),
        num_parallel_calls=_NUM_PARALLEL_CALLS)
    test_dataset = test_dataset.batch(1).repeat()
    test_dataset = test_dataset.prefetch(buffer_size=_PREFETCH_BUFFER_SIZE)

    # iterator = dataset.make_one_shot_iterator()
    # clip_holder, label_holder = iterator.get_next()
    iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
                                               train_dataset.output_shapes)
    train_init_op = iterator.make_initializer(train_dataset)
    test_init_op = iterator.make_initializer(test_dataset)

    clip_holder, label_holder = iterator.get_next()
    clip_holder = tf.squeeze(clip_holder, [1])
    label_holder = tf.squeeze(label_holder, [1])
    clip_holder.set_shape(
        [None, None, _FRAME_SIZE, _FRAME_SIZE, _CHANNEL[mode]])
    dropout_holder = tf.placeholder(tf.float32)
    is_train_holder = tf.placeholder(tf.bool)

    # inference module
    # Inference Module
    model = r2plus1d.R2Plus1D()
    # the line below outputs the final results with logits
    # __call__ uses _template, and _template uses _build when defined
    logits = model(clip_holder, is_training=is_train_holder)
    logits_dropout = tf.nn.dropout(logits, dropout_holder)
    # To change 400 classes to the ucf101 or hdmb classes
    fc_out = tf.layers.dense(logits_dropout,
                             _CLASS_NUM[dataset],
                             use_bias=True)
    #print(fc_out.shape)
    # compute the top-k results for the whole batch size
    is_in_top_1_op = tf.nn.in_top_k(fc_out, label_holder, 1)

    # Loss calculation, including L2-norm
    variable_map = {}
    for variable in tf.global_variables():
        tmp = variable.name.split('/')
        variable_map[variable.name.replace('R2Plus1D/', '').replace(
            '/', '_').replace(':0', '').replace('gamma', 's').replace(
                'beta', 'b').replace('moving_mean', 'rm').replace(
                    'moving_variance',
                    'riv').replace('_1_conv', '_conv_1').replace(
                        '_2_conv',
                        '_conv_2').replace('conv_2_1', '2_conv_1').replace(
                            '_1_spatbn_m', '_spatbn_1_m').replace(
                                '_2_spatbn_m', '_spatbn_2_m').replace(
                                    'comp_3_shortcut_projection',
                                    'shortcut_projection_3').replace(
                                        'comp_7_shortcut_projection',
                                        'shortcut_projection_7').replace(
                                            'comp_13_shortcut_projection',
                                            'shortcut_projection_13').replace(
                                                'kernel',
                                                'w').replace('bias',
                                                             'b')] = variable
        if tmp[-1] == 'w:0' or tmp[-1] == 'kernel:0':
            weight_l2 = tf.nn.l2_loss(variable)
            tf.add_to_collection('weight_l2', weight_l2)

    loss_weight = tf.add_n(tf.get_collection('weight_l2'), 'loss_weight')
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label_holder,
                                                       logits=fc_out))
    total_loss = loss + _WEIGHT_OF_LOSS_WEIGHT * loss_weight
    #tf.summary.scalar('loss', loss)
    #tf.summary.scalar('loss_weight', loss_weight)
    #tf.summary.scalar('total_loss', total_loss)

    # Import Pre-trainned model
    #saver = tf.train.Saver(var_list=variable_map, reshape=True)
    saver2 = tf.train.Saver(max_to_keep=9999)
    # Specific Hyperparams
    # steps for training: the number of steps on batch per epoch
    per_epoch_step = int(np.ceil(train_data.size / _BATCH_SIZE))
    # global step constant
    if mode == 'flow':
        _GLOBAL_EPOCH = 45
        boundaries = [20000, 30000, 35000, 40000]
        values = [1e-3, 8e-4, 5e-4, 3e-4, 1e-4]
    else:
        _GLOBAL_EPOCH = 20
        boundaries = [900, 1500, 2000, 2500, 3000]
        values = [1e-3, 8e-4, 5e-4, 3e-4, 1e-4, 5e-5]
    global_step = _GLOBAL_EPOCH * per_epoch_step
    # global step counting
    global_index = tf.Variable(0, trainable=False)

    # Set learning rate schedule by hand, also you can use an auto way
    learning_rate = tf.train.piecewise_constant(global_index, boundaries,
                                                values)

    #tf.summary.scalar('learning_rate', learning_rate)

    # Optimizer set-up
    # FOR BATCH norm, we then use this updata_ops
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        optimizer = tf.train.MomentumOptimizer(
            learning_rate, _MOMENTUM).minimize(total_loss,
                                               global_step=global_index)
    '''
    with tf.control_dependencies(update_ops):
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(total_loss, global_step=global_index)
    '''
    sess = tf.Session()
    #merged_summary = tf.summary.merge_all()
    #train_writer = tf.summary.FileWriter(log_dir, sess.graph)
    sess.run(tf.global_variables_initializer())
    sess.run(train_init_op)

    # Load pretrained weight
    if mode == 'rgb':
        weights_file = _CHECKPOINT_PATHS['r21d_rgb']
    else:
        weights_file = _CHECKPOINT_PATHS['r21d_flow']

    with open(weights_file, 'rb') as fopen:
        blobs = pickle.load(fopen, encoding='latin-1')['blobs']

    print("len of blobs %d" % (len(blobs)))

    for k, v in sorted(blobs.items()):
        if k in variable_map:
            print('loading -- %s' % (k))
            if len(v.shape) == 2:
                sess.run(tf.assign(variable_map[k], tf.transpose(v)))
            elif len(v.shape) == 5:
                sess.run(
                    tf.assign(variable_map[k],
                              tf.transpose(v, perm=[2, 3, 4, 1, 0])))
            else:
                sess.run(tf.assign(variable_map[k], v))
    #saver.restore(sess, _CHECKPOINT_PATHS[train_data.mode+'_imagenet'])

    print('----Here we start!----')
    print('Output wirtes to ' + log_dir)
    # logging.info('----Here we start!----')
    step = 0

    true_count = 0
    epoch_completed = 0

    start_time = time.time()

    while step <= global_step:
        step += 1
        #start_time = time.time()
        _, is_in_top_1 = sess.run([optimizer, is_in_top_1_op],
                                  feed_dict={
                                      dropout_holder: _DROPOUT,
                                      is_train_holder: True
                                  })
        #duration = time.time() - start_time
        if (investigate == 1) or (epoch_completed == _GLOBAL_EPOCH - 1):
            tmp = np.sum(is_in_top_1)
            true_count += tmp

        #train_writer.add_summary(summary, step)

        if step % per_epoch_step == 0:
            epoch_completed += 1
            if (investigate == 1) or (epoch_completed == _GLOBAL_EPOCH):
                train_accuracy = true_count / (per_epoch_step * _BATCH_SIZE)
                true_count = 0

                sess.run(test_init_op)
                true_count = 0
                # start test process
                for i in range(test_data.size):
                    # print(i,true_count)
                    is_in_top_1 = sess.run(is_in_top_1_op,
                                           feed_dict={
                                               dropout_holder: 1,
                                               is_train_holder: False
                                           })
                    true_count += np.sum(is_in_top_1)
                test_accuracy = true_count / test_data.size
                true_count = 0
                # to ensure every test procedure has the same test size
                test_data.index_in_epoch = 0
                print('Epoch%d - train: %.3f   test: %.3f   time: %d' %
                      (epoch_completed, train_accuracy, test_accuracy,
                       time.time() - start_time))
                logging.info('Epoch%d,train,%.3f,test,%.3f   time: %d' %
                             (epoch_completed, train_accuracy, test_accuracy,
                              time.time() - start_time))
                # saving the best params in test set
                saver2.save(
                    sess,
                    os.path.join(log_dir,
                                 test_data.name + '_' + train_data.mode),
                    epoch_completed)
                sess.run(train_init_op)
            else:
                print('Epoch%d - time: %d' %
                      (epoch_completed, time.time() - start_time))
                logging.info('Epoch%d time: %d' %
                             (epoch_completed, time.time() - start_time))
            start_time = time.time()
    #train_writer.close()
    sess.close()
Пример #4
0
def main(dataset='ucf101', mode='rgb', split=1):
    assert mode in ['rgb', 'flow'], 'Only RGB data and flow data is supported'
    log_dir = os.path.join(_LOG_ROOT,
                           'finetune-%s-%s-%d' % (dataset, mode, split))
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    logging.basicConfig(level=logging.INFO,
                        filename=os.path.join(log_dir, 'log.txt'),
                        filemode='w',
                        format='%(message)s')

    ##  Data Preload  ###
    train_info, test_info = split_data(
        os.path.join('./data', dataset, mode + '.csv'),
        os.path.join('./data', dataset, 'testlist%02d' % split + '.txt'))
    #        os.path.join('/data1/yunfeng/i3d_test/data', dataset, mode+'.txt'),
    #        os.path.join('/data1/yunfeng/i3d_test/data', dataset, 'testlist%02d' % split+'.txt'))
    train_data = Action_Dataset(dataset, mode, train_info)
    test_data = Action_Dataset(dataset, mode, test_info)

    num_train_sample = len(train_info)
    # Every element in train_info is shown as below:
    # ['v_ApplyEyeMakeup_g08_c01',
    # '/data4/zhouhao/dataset/ucf101/jpegs_256/v_ApplyEyeMakeup_g08_c01',
    # '121', '0']
    train_info_tensor = tf.constant(train_info)
    test_info_tensor = tf.constant(test_info)

    # Dataset building
    # Phase 1 Trainning
    # one element in this dataset is (train_info list)
    train_info_dataset = tf.data.Dataset.from_tensor_slices(
        (train_info_tensor))
    # one element in this dataset is (single image_postprocess, single label)
    # one element in this dataset is (batch image_postprocess, batch label)
    train_info_dataset = train_info_dataset.shuffle(
        buffer_size=num_train_sample)
    train_dataset = train_info_dataset.map(
        lambda x: _get_data_label_from_info(x, dataset, mode),
        num_parallel_calls=_NUM_PARALLEL_CALLS)
    train_dataset = train_dataset.repeat().batch(_BATCH_SIZE)
    train_dataset = train_dataset.prefetch(buffer_size=_PREFETCH_BUFFER_SIZE)

    # Phase 2 Testing
    # one element in this dataset is (train_info list)
    test_info_dataset = tf.data.Dataset.from_tensor_slices((test_info_tensor))
    # one element in this dataset is (single image_postprocess, single label)
    test_dataset = test_info_dataset.map(
        lambda x: _get_data_label_from_info(x, dataset, mode),
        num_parallel_calls=_NUM_PARALLEL_CALLS)
    # one element in this dataset is (batch image_postprocess, batch label)
    test_dataset = test_dataset.batch(1).repeat()
    test_dataset = test_dataset.prefetch(buffer_size=_PREFETCH_BUFFER_SIZE)

    # iterator = dataset.make_one_shot_iterator()
    # clip_holder, label_holder = iterator.get_next()
    iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
                                               train_dataset.output_shapes)
    train_init_op = iterator.make_initializer(train_dataset)
    test_init_op = iterator.make_initializer(test_dataset)

    clip_holder, label_holder = iterator.get_next()
    clip_holder = tf.squeeze(clip_holder, [1])
    label_holder = tf.squeeze(label_holder, [1])
    clip_holder.set_shape(
        [None, None, _FRAME_SIZE, _FRAME_SIZE, _CHANNEL[mode]])
    dropout_holder = tf.placeholder(tf.float32)
    is_train_holder = tf.placeholder(tf.bool)

    # inference module
    # Inference Module
    with tf.variable_scope(_SCOPE[train_data.mode]):
        # insert i3d model
        model = i3d.InceptionI3d(400,
                                 spatial_squeeze=True,
                                 final_endpoint='Logits')
        # the line below outputs the final results with logits
        # __call__ uses _template, and _template uses _build when defined
        logits, _ = model(clip_holder,
                          is_training=is_train_holder,
                          dropout_keep_prob=dropout_holder)
        logits_dropout = tf.nn.dropout(logits, dropout_holder)
        # To change 400 classes to the ucf101 or hdmb classes
        fc_out = tf.layers.dense(logits_dropout,
                                 _CLASS_NUM[dataset],
                                 use_bias=True)
        # compute the top-k results for the whole batch size
        is_in_top_1_op = tf.nn.in_top_k(fc_out, label_holder, 1)

    # Loss calculation, including L2-norm
    variable_map = {}
    train_var = []
    for variable in tf.global_variables():
        tmp = variable.name.split('/')
        if tmp[0] == _SCOPE[train_data.mode] and 'dense' not in tmp[1]:
            variable_map[variable.name.replace(':0', '')] = variable
        if tmp[-1] == 'w:0' or tmp[-1] == 'kernel:0':
            weight_l2 = tf.nn.l2_loss(variable)
            tf.add_to_collection('weight_l2', weight_l2)
    loss_weight = tf.add_n(tf.get_collection('weight_l2'), 'loss_weight')
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label_holder,
                                                       logits=fc_out))
    total_loss = loss + _WEIGHT_OF_LOSS_WEIGHT * loss_weight
    tf.summary.scalar('loss', loss)
    tf.summary.scalar('loss_weight', loss_weight)
    tf.summary.scalar('total_loss', total_loss)

    # Import Pre-trainned model
    saver = tf.train.Saver(var_list=variable_map, reshape=True)
    saver2 = tf.train.Saver(max_to_keep=_SAVER_MAX_TO_KEEP)
    # Specific Hyperparams
    # steps for training: the number of steps on batch per epoch
    per_epoch_step = int(np.ceil(train_data.size / _BATCH_SIZE))
    # global step constant
    global_step = _GLOBAL_EPOCH * per_epoch_step
    # global step counting
    global_index = tf.Variable(0, trainable=False)

    # Set learning rate schedule by hand, also you can use an auto way
    #boundaries = [10000, 20000, 30000, 40000, 50000]
    #values = [_LEARNING_RATE, 0.0008, 0.0005, 0.0003, 0.0001, 5e-5]
    #learning_rate = tf.train.piecewise_constant(
    #    global_index, boundaries, values)

    STEP_SIZE = per_epoch_step * _STEP_SIZE_FACTOR
    learning_rate = clr.clr(_BASE_LR, _MAX_LR, STEP_SIZE, global_index)

    #learning_rate = tf.train.exponential_decay(
    #        learning_rate=0.000001, global_step=global_index, decay_steps=1, decay_rate=1.001)
    tf.summary.scalar('learning_rate', learning_rate)

    # Optimizer set-up
    # FOR BATCH norm, we then use this updata_ops
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        optimizer = tf.train.MomentumOptimizer(
            learning_rate, _MOMENTUM).minimize(total_loss,
                                               global_step=global_index)
    sess = tf.Session()
    merged_summary = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(log_dir, sess.graph)
    sess.run(tf.global_variables_initializer())
    sess.run(train_init_op)
    saver.restore(sess, _CHECKPOINT_PATHS[train_data.mode + '_imagenet'])

    print('----Here we start!----')
    print('Output wirtes to ' + log_dir)
    # logging.info('----Here we start!----')
    step = 0
    # for one epoch
    true_count = 0
    # for 20 batches
    tmp_count = 0
    accuracy_tmp = 0
    epoch_completed = 0
    while step <= global_step:
        step += 1
        #start_time = time.time()
        _, loss_now, loss_plus, is_in_top_1, summary = sess.run(
            [
                optimizer, total_loss, loss_weight, is_in_top_1_op,
                merged_summary
            ],
            feed_dict={
                dropout_holder: _DROPOUT,
                is_train_holder: True
            })
        #duration = time.time() - start_time
        tmp = np.sum(is_in_top_1)
        true_count += tmp
        tmp_count += tmp
        train_writer.add_summary(summary, step)
        # responsible for printing relevant results
        '''if step % _OUTPUT_STEP == 0:
            accuracy = tmp_count / (_OUTPUT_STEP * _BATCH_SIZE)
            print('step: %-4d, loss: %-.4f, accuracy: %.3f (%.2f sec/batch)' %
                  (step, loss_now, accuracy, float(duration)))
            logging.info('step: % -4d, loss: % -.4f,\
                             accuracy: % .3f ( % .2f sec/batch)' %
                         (step, loss_now, accuracy, float(duration)))
            tmp_count = 0'''
        if step % per_epoch_step == 0:
            epoch_completed += 1
            accuracy = true_count / (per_epoch_step * _BATCH_SIZE)
            print('Epoch%d, train accuracy: %.3f' %
                  (epoch_completed, accuracy))
            logging.info('Epoch%d, train accuracy: %.3f' %
                         (train_data.epoch_completed, accuracy))
            true_count = 0
            if (step == global_step) or (step % (2 * STEP_SIZE) == 0):
                sess.run(test_init_op)
                true_count = 0
                # start test process
                print(test_data.size)
                for i in range(test_data.size):
                    # print(i,true_count)
                    is_in_top_1 = sess.run(is_in_top_1_op,
                                           feed_dict={
                                               dropout_holder: 1,
                                               is_train_holder: False
                                           })
                    true_count += np.sum(is_in_top_1)
                accuracy = true_count / test_data.size
                true_count = 0
                # to ensure every test procedure has the same test size
                test_data.index_in_epoch = 0
                print('Epoch%d, test accuracy: %.3f' %
                      (epoch_completed, accuracy))
                logging.info('Epoch%d, test accuracy: %.3f' %
                             (train_data.epoch_completed, accuracy))
                # saving the best params in test set
                if (epoch_completed > 0):
                    saver2.save(
                        sess,
                        os.path.join(log_dir,
                                     test_data.name + '_' + train_data.mode),
                        epoch_completed)
                sess.run(train_init_op)
    train_writer.close()
    sess.close()
Пример #5
0
def main(dataset='ucf101', mode='mixed', split=1):
    assert mode in ['rgb', 'flow',
                    'mixed'], 'Only RGB data and flow data is supported'
    log_dir = os.path.join(_LOG_ROOT, 'test')
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    logging.basicConfig(level=logging.INFO,
                        filename=os.path.join(log_dir, 'log-%d.txt' % (split)),
                        filemode='w',
                        format='%(message)s')

    ##  Data Preload  ###
    rgb_test_info = get_each_frame_test_info(
        os.path.join('./data', dataset, 'rgb.csv'),
        os.path.join('./data', dataset, 'testlist%02d' % split + '.txt'),
        mode='rgb')
    #        os.path.join('/data1/yunfeng/i3d_test/data', dataset, mode+'.txt'),
    #        os.path.join('/data1/yunfeng/i3d_test/data', dataset, 'testlist%02d' % split+'.txt'))
    flow_test_info = get_each_frame_test_info(
        os.path.join('./data', dataset, 'flow.csv'),
        os.path.join('./data', dataset, 'testlist%02d' % split + '.txt'),
        mode='flow')
    #        os.path.join('/data1/yunfeng/i3d_test/data', dataset, mode+'.txt'),
    #        os.path.join('/data1/yunfeng/i3d_test/data', dataset, 'testlist%02d' % split+'.txt'))
    rgb_data = Action_Dataset(dataset, mode, rgb_test_info)
    flow_data = Action_Dataset(dataset, mode, flow_test_info)

    num_rgb_sample = len(rgb_test_info)
    num_flow_sample = len(flow_test_info)
    print(num_rgb_sample)
    print(rgb_data.size)
    print(num_flow_sample)
    print(flow_data.size)

    # Every element in train_info is shown as below:
    # ['v_ApplyEyeMakeup_g08_c01',
    # '/data4/zhouhao/dataset/ucf101/jpegs_256/v_ApplyEyeMakeup_g08_c01',
    # '121', '0']
    #print(rgb_test_info)
    rgb_info_tensor = tf.constant(rgb_test_info)
    flow_info_tensor = tf.constant(flow_test_info)

    # Dataset building
    # Phase 1 Trainning
    # one element in this dataset is (train_info list)
    rgb_info_dataset = tf.data.Dataset.from_tensor_slices((rgb_info_tensor))
    flow_info_dataset = tf.data.Dataset.from_tensor_slices((flow_info_tensor))

    test_dataset = tf.data.Dataset.zip((rgb_info_dataset, flow_info_dataset))
    # one element in this dataset is (single image_postprocess, single label)
    test_dataset = test_dataset.map(
        lambda x, y: _get_data_label_from_info(x, y, dataset),
        num_parallel_calls=_NUM_PARALLEL_CALLS)
    # one element in this dataset is (batch image_postprocess, batch label)
    test_dataset = test_dataset.repeat().batch(_BATCH_SIZE)
    test_dataset = test_dataset.prefetch(buffer_size=_PREFETCH_BUFFER_SIZE)

    # iterator = dataset.make_one_shot_iterator()
    # clip_holder, label_holder = iterator.get_next()
    iterator = tf.data.Iterator.from_structure(test_dataset.output_types,
                                               test_dataset.output_shapes)
    test_init_op = iterator.make_initializer(test_dataset)

    rgb_clip_holder, rgb_label_holder, flow_clip_holder, flow_label_holder = iterator.get_next(
    )

    rgb_clip_holder = tf.squeeze(rgb_clip_holder, [1])
    rgb_label_holder = tf.squeeze(rgb_label_holder, [1])
    rgb_clip_holder.set_shape(
        [None, None, _FRAME_SIZE, _FRAME_SIZE, _CHANNEL['rgb']])

    flow_clip_holder = tf.squeeze(flow_clip_holder, [1])
    flow_label_holder = tf.squeeze(flow_label_holder, [1])
    flow_clip_holder.set_shape(
        [None, None, _FRAME_SIZE, _FRAME_SIZE, _CHANNEL['flow']])

    dropout_holder = tf.placeholder(tf.float32)
    is_train_holder = tf.placeholder(tf.bool)

    # inference module
    # Inference Module
    with tf.variable_scope(_SCOPE['rgb']):
        # insert i3d model
        rgb_model = i3d.InceptionI3d(400,
                                     spatial_squeeze=True,
                                     final_endpoint='Logits')
        # the line below outputs the final results with logits
        # __call__ uses _template, and _template uses _build when defined
        rgb_logits, _ = rgb_model(rgb_clip_holder,
                                  is_training=is_train_holder,
                                  dropout_keep_prob=dropout_holder)
        rgb_logits_dropout = tf.nn.dropout(rgb_logits, dropout_holder)
        # To change 400 classes to the ucf101 or hdmb classes
        rgb_fc_out = tf.layers.dense(rgb_logits_dropout,
                                     _CLASS_NUM[dataset],
                                     use_bias=True)

    with tf.variable_scope(_SCOPE['flow']):
        # insert i3d model
        flow_model = i3d.InceptionI3d(400,
                                      spatial_squeeze=True,
                                      final_endpoint='Logits')
        # the line below outputs the final results with logits
        # __call__ uses _template, and _template uses _build when defined
        flow_logits, _ = flow_model(flow_clip_holder,
                                    is_training=is_train_holder,
                                    dropout_keep_prob=dropout_holder)
        flow_logits_dropout = tf.nn.dropout(flow_logits, dropout_holder)
        # To change 400 classes to the ucf101 or hdmb classes
        flow_fc_out = tf.layers.dense(flow_logits_dropout,
                                      _CLASS_NUM[dataset],
                                      use_bias=True)

    mixed_fc_out = _MIX_WEIGHT_OF_RGB * rgb_fc_out + _MIX_WEIGHT_OF_FLOW * flow_fc_out

    rgb_softmax_op = tf.nn.softmax(rgb_fc_out)
    flow_softmax_op = tf.nn.softmax(flow_fc_out)
    mixed_softmax_op = tf.nn.softmax(mixed_fc_out)

    rgb_in_top_1_op = tf.nn.in_top_k(rgb_softmax_op, rgb_label_holder, 1)
    flow_in_top_1_op = tf.nn.in_top_k(flow_softmax_op, flow_label_holder, 1)
    mixed_in_top_1_op = tf.nn.in_top_k(mixed_softmax_op, rgb_label_holder, 1)
    # Loss calculation, including L2-norm
    variable_map = {}
    for variable in tf.global_variables():
        tmp = variable.name.split('/')
        if tmp[0] == _SCOPE['rgb']:
            variable_map[variable.name.replace(':0', '')] = variable
    rgb_saver = tf.train.Saver(var_list=variable_map)
    variable_map = {}
    for variable in tf.global_variables():
        tmp = variable.name.split('/')
        if tmp[0] == _SCOPE['flow']:
            variable_map[variable.name.replace(':0', '')] = variable
    flow_saver = tf.train.Saver(var_list=variable_map, reshape=True)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    rgb_saver.restore(sess, _CHECKPOINT_PATHS_RGB[int(split) - 1])
    flow_saver.restore(sess, _CHECKPOINT_PATHS_FLOW[int(split) - 1])

    # Specific Hyperparams
    # steps for training: the number of steps on batch per epoch

    print('----Here we start!----')
    print('Output wirtes to ' + log_dir)
    # logging.info('----Here we start!----')
    # for one epoch
    rgb_true_count = 0
    flow_true_count = 0
    mixed_true_count = 0
    # for 20 batches
    sess.run(test_init_op)

    rgb_outs = []
    flow_outs = []
    labels = []

    for i in range(rgb_data.size):
        rgb_in_top_1, flow_in_top_1, mixed_in_top_1, rgb_out, flow_out, label = sess.run(
            [
                rgb_in_top_1_op, flow_in_top_1_op, mixed_in_top_1_op,
                rgb_fc_out, flow_fc_out, rgb_label_holder
            ],
            feed_dict={
                dropout_holder: 1,
                is_train_holder: False
            })

        rgb_true_count += np.sum(rgb_in_top_1)
        flow_true_count += np.sum(flow_in_top_1)
        mixed_true_count += np.sum(mixed_in_top_1)

        rgb_outs.append(rgb_out[0])
        flow_outs.append(flow_out[0])
        labels.append(label[0])

        print('rgb: %7d   flow: %7d   mixed:%7d   total: %7d/%d' %
              (rgb_true_count, flow_true_count, mixed_true_count, i,
               rgb_data.size))
        logging.info('rgb: %7d   flow: %7d   mixed:%7d' %
                     (rgb_true_count, flow_true_count, mixed_true_count))

    rgb_accuracy = rgb_true_count / rgb_data.size
    flow_accuracy = flow_true_count / rgb_data.size
    mixed_accuracy = mixed_true_count / rgb_data.size

    rgb_outs = np.asarray(rgb_outs)
    flow_outs = np.asarray(flow_outs)
    labels = np.asarray(labels)

    result_data = pd.concat([
        pd.DataFrame(rgb_outs,
                     columns=['r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8']),
        pd.DataFrame(flow_outs,
                     columns=['f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8']),
        pd.DataFrame(labels, columns=['label'])
    ],
                            axis=1)

    result_data.to_csv(
        os.path.join(_LOG_ROOT, 'test', 'result-%d.csv' % (split)))

    print('Accuracy:  rgb-%.3f   flow-%.3f   mixed-%.3f' %
          (rgb_accuracy, flow_accuracy, mixed_accuracy))
    logging.info('Accuracy:  rgb-%.3f   flow-%.3f   mixed-%.3f' %
                 (rgb_accuracy, flow_accuracy, mixed_accuracy))

    sess.close()