Ejemplo n.º 1
0
    def parse(self, url, config):
        """ 解析视频信息,获取下载链接 """

        CONFIG.update(config)
        title = self.get_title(url)
        print("[Info] 视频标题:{}".format(title))

        # 创建目录结构
        CONFIG["video_dir"] = touch_dir(
            repair_filename(os.path.join(CONFIG['dir'], title + "-bilibili")))
        if not CONFIG["video_dir"]:
            print('[Error] 视频已存在!')
            return []
        # CONFIG["video_dir"] = touch_dir(os.path.join(CONFIG['base_dir'], "Videos"))

        print("[Info] 保存路径:{}".format(
            os.path.dirname(os.path.abspath(__file__)) + '\\' +
            CONFIG["video_dir"]))
        print("[Info] 正在解析视频信息...")
        videos = self.get_videos(url)

        print("[Info] 视频共有{}P".format(len(videos)))

        CONFIG["videos"] = videos
        for video in videos:
            danmaku_path = os.path.splitext(video.path)[0] + ".xml"
            self.get_danmaku(video.cid, danmaku_path)
            self.get_segment(video)
        return videos


# if __name__ == "__main__":
#     parser = BilibiliParser()
#     avid, bvid, cid, qn = [882566744, 2, 172274931, 80]

#     info = parser.info_api(avid, bvid)
#     parse = parser.parse_api(avid, bvid, cid, qn)
#     subtitle = parser.subtitle_api(cid, avid, bvid)
#     danmaku = parser.danmaku_api(cid)
#     print(info, parse, subtitle, danmaku, sep='\n')
Ejemplo n.º 2
0
import utils
import os
import numpy as np

if __name__ == '__main__':
    path = utils.get_dataset_path('UCF50')
    dirs = utils.get_dirs(path)
    train_list = []
    val_list = []
    test_list = []
    train_set_percentage = 0.7
    val_set_percentage_from_training = 0.2
    for d in dirs:
        current_dir = os.path.join(path, d)
        files = utils.get_files(current_dir, '.avi', append_base=False)
        files = [os.path.join(d, f) for f in files]
        np.random.shuffle(files)

        num_train = int(len(files) * train_set_percentage)
        num_val = int(num_train * val_set_percentage_from_training)
        train_list.extend(files[0:num_train - num_val])
        val_list.extend(files[num_train - num_val:num_train])
        test_list.extend(files[num_train:])
        print(len(files), len(train_list), len(val_list), len(test_list))

    save_path = path + '_lists'
    utils.touch_dir(save_path)
    utils.txt_write(save_path + '/trainlist.txt', train_list)
    utils.txt_write(save_path + '/vallist.txt', val_list)
    utils.txt_write(save_path + '/testlist.txt', test_list)
Ejemplo n.º 3
0
def _save_img_sequence(output_dir, img_seq, rescale=False):
    utils.touch_dir(output_dir)

    for i in range(img_seq.shape[0]):
        _save_img(osp.join(output_dir, '%06d.png' % i), img_seq[i], rescale=rescale)
    pass
Ejemplo n.º 4
0
def main():
    np.random.seed(1)
    parser = ArgumentParser()
    parser.add_argument('--config', type=str, required=True, help='path of the configuration file')
    parser.add_argument('--checkpoint_stage1', type=str, required=True, help='path of the stage1 checkpoint')
    parser.add_argument('--checkpoint_stage2', type=str, required=True, help='path of the stage2 checkpoint')
    parser.add_argument('--save_dir', type=str, required=False, help='root dir to save results', default='results/eval')
    args = parser.parse_args()

    config = utils.load_config(args.config)
    model_config = config['model']
    paths_config = config['paths']
    data_dir = paths_config['data_dir']
    n_points = model_config['n_pts']
    n_action = model_config['n_action']
    batch_size = 1

    session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
    session_config.gpu_options.allow_growth = True

    mse_list = []

    # directory setup

    if not _checkpoint_exist(args.checkpoint_stage1):
        raise Exception('checkpoint not found at %s' % args.checkpoint_stage1)

    if not _checkpoint_exist(args.checkpoint_stage1):
        raise Exception('checkpoint not found at %s' % args.checkpoint_stage2)

    # start session
    with tf.Session(config=session_config) as sess:
        # import dataset
        test_loader = SequenceDataLoader(data_dir, 'test',
                                         n_points=n_points, n_action=n_action,
                                         random_order=False,
                                         randomness=False,
                                         with_image_seq=True)
        test_dataset = test_loader.get_dataset(batch_size,
                                               repeat=False,
                                               shuffle=False,
                                               num_preprocess_threads=12)

        # setup inputs
        training_pl = tf.placeholder(tf.bool)
        handle_pl = tf.placeholder(tf.string, shape=[])
        base_iterator = tf.data.Iterator.from_string_handle(handle_pl, test_dataset.output_types,
                                                            test_dataset.output_shapes)
        inputs = base_iterator.get_next()

        # initializing models
        model = FinalModel(config)
        print('model initialized')
        model.build(inputs)

        # variables initialization
        tf.logging.set_verbosity(tf.logging.INFO)
        global_init = tf.global_variables_initializer()
        local_init = tf.local_variables_initializer()
        sess.run([global_init, local_init])

        # data iterator initialization
        test_iterator = test_dataset.make_initializable_iterator()
        test_handle = sess.run(test_iterator.string_handle())

        # checkpoint restoration
        model.restore(sess, args.checkpoint_stage1)
        model.restore(sess, args.checkpoint_stage2)

        # iterator initialization
        sess.run(test_iterator.initializer)

        # running on test dataset
        sample_idx = 0
        n_iters = utils.training.get_n_iterations(test_loader.length(), batch_size)
        feed_dict = {handle_pl: test_handle, training_pl: False}
        for j in range(90):
            min_mse = 1.0
            for s in range(20):
                outputs = model.run(sess, feed_dict)
                cur_mse = np.mean(np.abs(outputs['fut_pt_raw'] - outputs['real_seq']))

                if cur_mse < min_mse:
                    min_mse = cur_mse
                    # saving outputs
                    outputs_im = outputs['im']
                    outputs_real_im_seq = outputs['real_im_seq']
                    outputs_pred_im_seq = outputs['pred_im_seq']
                    # print(outputs['z'][0,:10])

                    outputs_mask = outputs['mask']
                    outputs_pred_im_crude = outputs['pred_im_crude']
                    outputs_current_points = outputs['current_points']
                    outputs_future_points = outputs['future_points']

                    batch_dim = outputs['im'].shape[0]
                    for batch_idx in range(batch_dim):
                        sample_save_dir = osp.join(args.save_dir, '%04d' % sample_idx)
                        utils.touch_dir(sample_save_dir)

                        _save_img(osp.join(sample_save_dir, 'input_im.png'),
                                outputs_im[batch_idx],
                                rescale=True)
                        _save_img(osp.join(sample_save_dir, 'current_points.png'),
                                outputs_current_points[batch_idx],
                                rescale=False)

                        _save_img_sequence(osp.join(sample_save_dir, 'real_seq'),
                                        outputs_real_im_seq[batch_idx],
                                        rescale=True)
                        _save_img_sequence(osp.join(sample_save_dir, 'pred_seq'),
                                        outputs_pred_im_seq[batch_idx],
                                        rescale=True)
                        _save_img_sequence(osp.join(sample_save_dir, 'mask'),
                                        outputs_mask[batch_idx],
                                        rescale=False)
                        _save_img_sequence(osp.join(sample_save_dir, 'crude'),
                                        outputs_pred_im_crude[batch_idx],
                                        rescale=False)
                        _save_img_sequence(osp.join(sample_save_dir, 'crude'),
                                        outputs_pred_im_crude[batch_idx],
                                        rescale=True)
                        _save_img_sequence(osp.join(sample_save_dir, 'pred_points'),
                                        outputs_future_points[batch_idx],
                                        rescale=False)

                        # next sample idx
                pass
            sample_idx += 1
            print(j, min_mse)
            pass
        print('iteration through test set finished')
    # return np.array(mse_list)
    pass
Ejemplo n.º 5
0
        print('**** Train has ', len(files_list))
        save_dir = os.path.join(save_dir, 'train')
        max_num_tuplex = 2000  ##200000
    elif (current_subset == const.Subset.VAL.value):
        files_list = utils.txt_read(
            os.path.join(dataset_path + '_lists', 'vallist.txt'))
        print('**** Val has ', len(files_list))
        save_dir = os.path.join(save_dir, 'val')
        max_num_tuplex = 100  ## 20000
    elif (current_subset == const.Subset.TEST.value):
        files_list = utils.txt_read(
            os.path.join(dataset_path + '_lists', 'testlist.txt'))
        print('*** Test has ', len(files_list))
        save_dir = os.path.join(save_dir, 'test')

    utils.touch_dir(save_dir)
    tuple_idx = 0
    tuple_idx = max(0,
                    utils.last_tuple_idx(save_dir) -
                    5)  ## As caution, regenerate last 5 tuples
    activity_list = sorted(utils.get_dirs(dataset_path))
    print('activity_list ', len(activity_list), activity_list)

    lbls_file = os.path.join(save_dir, 'lbl.pkl')

    if (tuple_idx == 0):
        lbls_ary = np.ones(max_num_tuplex, dtype=np.int32) * -1
        ## Invalid Activity
    else:
        lbls_ary = utils.pkl_read(lbls_file)
        tuple_idx = 0
Ejemplo n.º 6
0
def main():
    parser = ArgumentParser()
    parser.add_argument('--config', type=str, required=True, help='path of the configuration file')
    parser.add_argument('--checkpoint', type=str, required=True, help='path of the pretrained keypoints detector')
    args = parser.parse_args()

    config = utils.load_config(args.config)
    paths_config = config['paths']
    data_dir = paths_config['data_dir']
    batch_size = 1
    keypoints_root_dir = osp.join(data_dir, 'pseudo_labels')

    session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
    session_config.gpu_options.allow_growth = True

    # directory setup
    utils.touch_dir(keypoints_root_dir)

    if not tf.gfile.Exists(args.checkpoint) and not tf.gfile.Exists(args.checkpoint + '.index'):
        raise Exception('checkpoint not found at %s' % args.checkpoint)

    # start session
    with tf.Session(config=session_config) as sess:
        # import dataset
        train_loader = KeypointDataLoader(data_dir, 'train')
        test_loader = KeypointDataLoader(data_dir, 'test')
        train_dataset = train_loader.get_dataset(batch_size,
                                                 repeat=False,
                                                 shuffle=False,
                                                 num_preprocess_threads=12)
        test_dataset = test_loader.get_dataset(batch_size,
                                               repeat=False,
                                               shuffle=False,
                                               num_preprocess_threads=12)

        # setup inputs
        training_pl = tf.placeholder(tf.bool)
        handle_pl = tf.placeholder(tf.string, shape=[])
        base_iterator = tf.data.Iterator.from_string_handle(handle_pl, train_dataset.output_types,
                                                            train_dataset.output_shapes)
        inputs = base_iterator.get_next()

        # initializing models
        model = KeypointModel(config)
        print('model initialized')
        model.build(inputs)

        # variables initialization
        tf.logging.set_verbosity(tf.logging.INFO)
        global_init = tf.global_variables_initializer()
        local_init = tf.local_variables_initializer()
        sess.run([global_init, local_init])

        # data iterator initialization
        train_iterator = train_dataset.make_initializable_iterator()
        test_iterator = test_dataset.make_initializable_iterator()
        train_handle = sess.run(train_iterator.string_handle())
        test_handle = sess.run(test_iterator.string_handle())

        # checkpoint restoration
        model.restore(sess, args.checkpoint)

        # iterator initialization
        sess.run(train_iterator.initializer)
        sess.run(test_iterator.initializer)

        # running on train dataset
        n_iters = utils.training.get_n_iterations(train_loader.length(), batch_size)
        feed_dict = {handle_pl: train_handle, training_pl: False}
        for _ in range(n_iters):
            outputs = model.run(sess, feed_dict)
            _save_output(keypoints_root_dir, outputs)
            pass
        print('iteration through train set finished')

        # running on test dataset
        n_iters = utils.training.get_n_iterations(test_loader.length(), batch_size)
        feed_dict = {handle_pl: test_handle, training_pl: False}
        for _ in range(n_iters):
            outputs = model.run(sess, feed_dict)
            _save_output(keypoints_root_dir, outputs)
            pass
        print('iteration through test set finished')
    pass