Exemplo n.º 1
0
def extract_vgg(video_directory, max_num_captions):
    """Extract VGG features."""
    vgg_features = list()
    # Session config.
    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    sess_config.gpu_options.visible_device_list = '0'

    resFile_video = '/home/yulia/github/Activity_net_whole/train10_qa.json'
    data = json.load(open(resFile_video))
    caption_info = data['caption_info']

    with tf.Graph().as_default(), tf.Session(config=sess_config) as sess:
        extractor = VideoVGGExtractor(max_num_captions, sess)
        video_id_train = open("video_id_train.txt", "r")
        for video_id in video_id_train:
            video_id_cap = 'v_' + video_id[:-1]
            if video_id_cap in caption_info.keys():
                list_timestamps = caption_info[video_id_cap]['timestamps']
            video_path = os.path.join(video_directory, video_id[:-1] + '.avi')
            print('[VGG]', video_path)
            vgg_features.append(
                extractor.extract(video_path, list_timestamps,
                                  max_num_captions))
            # print(vgg_features[-1])
    video_id_train.close()
    return vgg_features
Exemplo n.º 2
0
def extract_vgg(video_directory):
    """Extract VGG features."""
    vgg_features = list()
    # Session config.
    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    sess_config.gpu_options.visible_device_list = '0'

    with tf.Graph().as_default(), tf.Session(config=sess_config) as sess:
        extractor = VideoVGGExtractor(20, sess)
        for i in range(1, 1971):
            video_path = os.path.join(video_directory, 'vid' + str(i) + '.avi')
            print('[VGG]', video_path)
            vgg_features.append(extractor.extract(video_path))
            # print(vgg_features[-1])
    return vgg_features