Пример #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--yml_cfg_file", type=str, default="./cfgs/bsn.yml")
    args = parser.parse_args()
    cfg = load_yml(args.yml_cfg_file)

    anet = cfg.anet
    train_dataset = get_pgm_feature_dataset(cfg.tem.tem_csv_dir, cfg.pgm.proposal_csv_path, anet.json_path,
                                            anet.video_info_new_csv_path, "training")
    val_dataset = get_pgm_feature_dataset(cfg.tem.tem_csv_dir, cfg.pgm.proposal_csv_path, anet.json_path,
                                          anet.video_info_new_csv_path, "validation")

    queue = mp.Queue()
    procs = []

    mkdir_p(cfg.pgm.pgm_feature_path)
    for i in range(len(train_dataset)):
        queue.put(('training', i))

    for i in range(len(val_dataset)):
        queue.put(('validation', i))

    for i in range(cfg.pgm.pgm_feature_workers):
        proc = mp.Process(target=proc_cb, args=(queue, cfg, {"training": train_dataset, "validation": val_dataset}))
        procs.append(proc)
        proc.start()

    t = 0
    while not queue.empty():
        log.log_info('{}s: Remain {} videos to be handled.'.format(t, queue.qsize()))
        time.sleep(1)
        t += 1
    log.log_warn("All video processed.")
    for proc in procs:
        proc.terminate()
Пример #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--yml_cfg_file", default="./cfgs/bsn.yml")
    args = parser.parse_args()
    cfg = load_yml(args.yml_cfg_file)
    trainer = PemTrainer(cfg, cfg.pem)
    trainer.train()
Пример #3
0
def main():
    # 解析参数
    parser = argparse.ArgumentParser()
    parser.add_argument("--yml_cfg_file", type=str)
    args = parser.parse_args()
    cfg = load_yml(args.yml_cfg_file)

    # 加载模型
    tem_model = Tem(cfg.input_features)
    model_info_path = os.path.join(cfg.save_root, 'model_model_info.txt')
    with open(model_info_path) as f:
        model_path = f.readline().strip()
        epoch = int(f.readline().strip())
    state_dicts = torch.load(model_path)
    tem_model.load_state_dict(state_dicts['models'][0])

    # 构建数据集
    anet = cfg.anet_dataset
    val_dataset = ActivityNetDataset.get_ltw_feature_dataset(
        anet.csv_path, anet.json_path, anet.video_info_new_csv_path,
        anet.class_name_path, 'validation')
    train_dataset = ActivityNetDataset.get_ltw_feature_dataset(
        anet.csv_path, anet.json_path, anet.video_info_new_csv_path,
        anet.class_name_path, 'training')
    val_dataloader = DataLoader(val_dataset,
                                batch_size=cfg.batch_size,
                                shuffle=False,
                                num_workers=16)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=cfg.batch_size,
                                  shuffle=False,
                                  num_workers=16)
    tem_model.cuda()
    tem_model.eval()

    log.log(log.INFO, "Start processing train dataset.")
    results = []
    for i, (features, _) in enumerate(val_dataloader):
        features = features.cuda()
        result = tem_model(features)
        results.append(result.cpu().detach().numpy())
    val_results = np.concatenate(results)

    log.log(log.INFO, "Start processing validation dataset.")
    results = []
    for i, (features, _) in enumerate(train_dataloader):
        features = features.cuda()
        result = tem_model(features)
        results.append(result.cpu().detach().numpy())

    train_results = np.concatenate(results)

    mkdir_p(os.path.split(cfg.tem_results_file)[0])

    log.log(log.INFO, "Dumping to {}".format(cfg.tem_results_file))
    with open(cfg.tem_results_file, 'wb') as f:
        results = {"training": train_results, "validation": val_results}
        pickle.dump(results, f)
Пример #4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--yml_cfg_file", type=str, default="./cfgs/bsn.yml")
    args = parser.parse_args()
    cfg = load_yml(args.yml_cfg_file)

    anet = cfg.anet
    train_dataset = get_tem_dataset(cfg.tem.tem_csv_dir, anet.json_path,
                                    anet.video_info_new_csv_path, "training")
    val_dataset = get_tem_dataset(cfg.tem.tem_csv_dir, anet.json_path,
                                  anet.video_info_new_csv_path, "validation")
    mkdir_p(cfg.pgm.proposal_csv_path)

    # prepare workers
    queue = mp.Queue()
    procs = []

    # feed datas
    for i in range(len(train_dataset)):
        queue.put(('training', i))

    for i in range(len(val_dataset)):
        queue.put(('validation', i))

    for i in range(cfg.pgm.proposal_workers):
        proc = mp.Process(target=sub_proc,
                          args=(queue, cfg, {
                              "training": train_dataset,
                              "validation": val_dataset
                          }))
        proc.start()
        procs.append(proc)

    t = 0
    while not queue.empty():
        remain = queue.qsize()
        log.log_info("Time: {}s, remain {} videos to be handled.".format(
            t, remain))
        time.sleep(1)
        t += 1

    log.log_info("All videos handled.")
    for proc in procs:
        proc.terminate()
Пример #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--yml_cfg_file", default="./cfgs/bsn.yml")
    args = parser.parse_args()
    cfg = load_yml(args.yml_cfg_file)
    anet = cfg.anet
    val_dataset = get_post_processing_dataset(cfg.pem.pem_csv_dir,
                                              anet.json_path,
                                              anet.video_info_new_csv_path,
                                              'validation')

    queue = mp.Queue()
    mp_dict = mp.Manager().dict()

    # for i in range(100):
    for i in range(len(val_dataset)):
        queue.put(i)

    procs = []
    for i in range(cfg.post_processing.pp_workers):
        proc = mp.Process(target=sub_proc,
                          args=(queue, mp_dict, val_dataset, cfg))
        procs.append(proc)
        proc.start()

    for proc in procs:
        proc.join()

    output_dict = {
        "version": "VERSION 1.3",
        "results": dict(mp_dict),
        "external_data": {}
    }
    log.log_info(len(mp_dict.keys()))
    mkdir_p(os.path.split(cfg.eval.results_json)[0])
    with open(cfg.eval.results_json, "w") as f:
        json.dump(output_dict, f)
    log.log_info("Dump results to {}.".format(cfg.eval.results_json))
Пример #6
0
    plt.setp(plt.axes().get_yticklabels(), fontsize=fn_size)
    mkdir_p(os.path.split(fig_save_path)[0])
    plt.savefig(fig_save_path)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--gt_cfg_file",
        default=
        "/home/dechao_meng/datasets/activitynet/annotations/activity_net_1_3_new.json"
    )
    parser.add_argument(
        "--yml_cfg_file",
        default=
        "/tmp/pycharm_project_580/action_proposals/trainer/bsn_trainer/cfgs/bsn.yml"
    )
    args = parser.parse_args()
    cfg = load_yml(args.yml_cfg_file)
    fig_save_path = cfg.eval.results_json[:-5] + time.asctime() + '.png'
    uniform_average_nr_proposals_valid, uniform_average_recall_valid, uniform_recall_valid = run_evaluation(
        args.gt_cfg_file,
        cfg.eval.results_json,
        max_avg_nr_proposals=100,
        tiou_thresholds=np.linspace(0.5, 0.95, 10),
        subset='validation')

    plot_metric(uniform_average_nr_proposals_valid,
                uniform_average_recall_valid, uniform_recall_valid,
                fig_save_path)