def main():
    args = args_parser()
    update_config(args.cfg)
    if cfg.BASIC.SHOW_CFG:
        pprint.pprint(cfg)
    # prepare running environment for the whole project
    # prepare_env(cfg)

    cas_dir = "/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/thumos14/000_thumos_29.13_save_model_Frame_wise_accuracy/save_for_post_process"
    epoch = 901

    datas = list()
    file_name_list = os.listdir(cas_dir)
    for file_name in file_name_list:
        data = np.load(os.path.join(cas_dir, file_name))
        datas.append(data)

    output_json_file_cas, test_acc_cas = evaluate_from_offline_cas(
        cfg, datas, epoch)
    if cfg.BASIC.VERBOSE:
        print('test_acc, cas %f' % (test_acc_cas))
    mAP, average_mAP = evaluate_mAP(
        cfg, output_json_file_cas,
        os.path.join(cfg.BASIC.CKPT_DIR, cfg.DATASET.GT_FILE),
        cfg.BASIC.VERBOSE)
def main():
    args = args_parser()
    update_config(args.cfg)
    if cfg.BASIC.SHOW_CFG:
        pprint.pprint(cfg)
    # prepare running environment for the whole project
    # prepare_env(cfg)
    base_branch_json = '/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/thumos14/thumos_ablation_only_cas_only_cam_separate_weight_save_model_debug/075_cas.json'
    cam_branch_json = '/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/thumos14/thumos_ablation_only_cas_only_cam_separate_weight_save_model_debug/089_cam.json'

    evaluate_mAP(cfg, base_branch_json, os.path.join(cfg.BASIC.CKPT_DIR, cfg.DATASET.GT_FILE), cfg.BASIC.VERBOSE)
    evaluate_mAP(cfg, cam_branch_json, os.path.join(cfg.BASIC.CKPT_DIR, cfg.DATASET.GT_FILE), cfg.BASIC.VERBOSE)

    base_branch_data = json.load(open(base_branch_json, 'r'))
    base_branch_results = base_branch_data['results']

    cam_branch_data = json.load(open(cam_branch_json, 'r'))
    cam_branch_results = cam_branch_data['results']
    for vid_name in cam_branch_results.keys():
        cam_branch_results[vid_name].extend(base_branch_results[vid_name])

    output_dict = {'version': 'VERSION 1.3', 'results': cam_branch_results, 'external_data': {}}

    result_file = '/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/thumos14/thumos_ablation_only_cas_only_cam_separate_weight_save_model_debug/cat.json'
    outfile = open(result_file, 'w')
    json.dump(output_dict, outfile)
    outfile.close()

    evaluate_mAP(cfg, result_file, os.path.join(cfg.BASIC.CKPT_DIR, cfg.DATASET.GT_FILE), cfg.BASIC.VERBOSE)
Exemple #3
0
def main():
    args = args_parser()
    update_config(args.cfg)
    if cfg.BASIC.SHOW_CFG:
        pprint.pprint(cfg)
    cfg.BASIC.ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')

    # dataloader
    val_dset = WtalDataset(cfg, cfg.DATASET.VAL_SPLIT)
    val_loader = DataLoader(val_dset,
                            batch_size=cfg.TEST.BATCH_SIZE,
                            shuffle=False,
                            num_workers=cfg.BASIC.WORKERS,
                            pin_memory=cfg.BASIC.PIN_MEMORY)

    # network
    model = LocNet(cfg)
    model.cuda()

    if not os.path.exists(args.res_dir):
        os.makedirs(args.res_dir)

    model = load_weights(model, args.weight_file)

    evaluate_vis_cas(cfg, val_loader, model, args.res_dir, args.is_minmax_norm)
Exemple #4
0
def main():
    args = parse_args()
    update_config(cfg, args)
    extra = cfg.MODEL.EXTRA
    print(cfg.MODEL.NAME)
    print(extra.FINAL_CONV_KERNEL)
    print(cfg.MODEL.EXTRA.STAGE2.NUM_CHANNELS)
Exemple #5
0
def main():
    args = args_parser()
    update_config(args.cfg)
    if cfg.BASIC.SHOW_CFG:
        pprint.pprint(cfg)
    # prepare running environment for the whole project
    # prepare_env(cfg)

    # dataloader
    val_dset = WtalDataset(cfg, cfg.DATASET.VAL_SPLIT)
    val_loader = DataLoader(val_dset,
                            batch_size=cfg.TEST.BATCH_SIZE,
                            shuffle=False,
                            num_workers=cfg.BASIC.WORKERS,
                            pin_memory=cfg.BASIC.PIN_MEMORY)

    # network
    model = LocNet(cfg)
    # model.apply(weight_init)

    model.cuda()

    # weight_file = ""
    # weight_file = "/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/0_NeurIPS2020_code_ok/results_and_model/thumos14_checkpoint_best_cas_epoch125_iou0.5__0.2928.pth"
    # weight_file = "/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/0_NeurIPS2020_code_ok/results_and_model/anet12_checkpoint_best_cas_epoch30_map_0.2394.pth"
    # weight_file = "/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/0_NeurIPS2020_code_ok/results_and_model/anet13_checkpoint_best_cas_epoch35_map_0.2348.pth"
    weight_file = "/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/thumos14/000_thumos_29.13_save_model_Frame_wise_accuracy/checkpoint_best_cas.pth"

    epoch = 801
    from utils.utils import load_weights
    model = load_weights(model, weight_file)

    evaluate_save_for_post_process(cfg, val_loader, model, epoch)
Exemple #6
0
def main():
    args = args_parser()
    update_config(args.cfg)
    if cfg.BASIC.SHOW_CFG:
        pprint.pprint(cfg)
    # prepare running environment for the whole project
    # prepare_env(cfg)
    gt_json_file = '/disk3/zt/code/1_actloc/1_simple_cas/lib/dataset/materials_THUMOS14/gt_thumos14_augment.json'
    with open(gt_json_file, 'r') as f:
        gt_datas = json.load(f)
    gt_data = gt_datas['database']

    cas_dir = "/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/thumos14/000_thumos_29.13_save_model_Frame_wise_accuracy/save_for_post_process"
    datas = list()
    file_name_list = os.listdir(cas_dir)
    for file_name in file_name_list:
        data = np.load(os.path.join(cas_dir, file_name))
        datas.append(data)

    gts_list = []
    max_idx_list = []
    for data in datas:
        cas_base = data["cas_base"]
        vid_name = str(data["vid_name"])
        # score_np=data["score_np"]
        # cls_label_np=data["cls_label_np"]
        # frame_num=data["frame_num"]
        # fps_or_vid_duration=data["fps_or_vid_duration"]
        # print()

        temporal_len = cfg.DATASET.NUM_SEGMENTS
        gts = []

        try:
            gt_data[vid_name]  # two special case
        except:
            continue

        for order, ann in enumerate(gt_data[vid_name]['annotations']):
            segment = ann['segment']
            label_idx = ann['idx'] + 1  # make bg 0
            gt = segment2idx(temporal_len, float(segment[0]),
                             float(segment[1]), gt_data[vid_name]['duration'],
                             label_idx)
            gts.append(gt)
        gts = np.array(gts)
        gts = np.sum(gts, axis=0)  #750 idx+1

        max_idx = np.argmax(cas_base, axis=0)
        max_idx += 1
        max_score = np.max(cas_base, axis=0)
        max_idx[max_score <= 0.7] = 0

        gts_list.append(gts)
        max_idx_list.append(max_idx)
        # print()
    print(round(accuracy_wo_bg(gts_list, max_idx_list, bg_class=0), 3))
def main():
    args = args_parser()
    update_config(args.cfg)
    if cfg.BASIC.SHOW_CFG:
        pprint.pprint(cfg)
    # prepare running environment for the whole project
    prepare_env(cfg)

    # dataloader
    val_dset = WtalDataset(cfg, cfg.DATASET.VAL_SPLIT)
    val_loader = DataLoader(val_dset,
                            batch_size=cfg.TEST.BATCH_SIZE,
                            shuffle=False,
                            num_workers=cfg.BASIC.WORKERS,
                            pin_memory=cfg.BASIC.PIN_MEMORY)

    # network
    model = LocNet(cfg)
    # model.apply(weight_init)

    model.cuda()

    # weight_file = '/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/0_NeurIPS2020_code_ok/results_and_model/thumos14_checkpoint_best_cas_epoch125_iou0.5__0.2928.pth'
    # weight_file = '/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/0_NeurIPS2020_code_ok/results_and_model/anet12_checkpoint_best_cas_epoch30_map_0.2545.pth'
    # weight_file = '/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/0_NeurIPS2020_code_ok/results_and_model/anet13_checkpoint_best_cas_epoch35_map_0.2348.pth'
    # weight_file = ''
    weight_file = '/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/thumos14/thumos_ablation_inv_0_save_model/checkpoint_best_cas_inv0_epoch69_0.2636.pth'
    # weight_file = '/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/thumos14/thumos_ablation_only_cas_save_model/checkpoint_best_cas_only_cas_epoch134_0.1957.pth'
    # weight_file = '/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/thumos14/thumos_ablation_individual_attention_2048k1_2048k1_2048k1_only_cam_svae_model/checkpoint_best_cas_only_cam_epoch96_0.1714.pth'
    res_dir = os.path.join(cfg.BASIC.CKPT_DIR, cfg.TEST.RESULT_DIR,
                           'vis/cas_gt_idx_minmax_norm_std')
    if not os.path.exists(res_dir):
        os.makedirs(res_dir)

    from utils.utils import load_weights
    model = load_weights(model, weight_file)

    epoch = 600
    output_json_file_cas, output_json_file_cam, test_acc_cas, test_acc_cam = evaluate(
        cfg, val_loader, model, epoch)
    evaluate_mAP(cfg, output_json_file_cas,
                 os.path.join(cfg.BASIC.CKPT_DIR, cfg.DATASET.GT_FILE),
                 cfg.BASIC.VERBOSE)
    evaluate_mAP(cfg, output_json_file_cam,
                 os.path.join(cfg.BASIC.CKPT_DIR, cfg.DATASET.GT_FILE),
                 cfg.BASIC.VERBOSE)

    is_minmax_norm = True
    evaluate_vis_cas_minmax_norm_std(cfg, val_loader, model, res_dir,
                                     is_minmax_norm)
Exemple #8
0
def main():
    args = args_parser()
    update_config(args.cfg)
    # path configuration
    cfg.BASIC.LOG_DIR = os.path.join(cfg.BASIC.CKPT_DIR,
                                     cfg.BASIC.TIME + cfg.BASIC.SUFFIX, 'log')
    cfg.BASIC.BACKUP_DIR = os.path.join(cfg.BASIC.CKPT_DIR,
                                        cfg.BASIC.TIME + cfg.BASIC.SUFFIX,
                                        'codes_backup')
    cfg.TRAIN.OUTPUT_DIR = os.path.join(cfg.BASIC.CKPT_DIR,
                                        cfg.BASIC.TIME + cfg.BASIC.SUFFIX,
                                        'output')
    cfg.TEST.RESULT_DIR = os.path.join(cfg.BASIC.CKPT_DIR,
                                       cfg.BASIC.TIME + cfg.BASIC.SUFFIX,
                                       'results')
    cfg.BASIC.CKPT_DIR = os.path.join(os.path.dirname(__file__), '..')
    if cfg.BASIC.SHOW_CFG:
        pprint.pprint(cfg)

    # log
    writer = SummaryWriter(
        log_dir=os.path.join(cfg.BASIC.CKPT_DIR, cfg.BASIC.LOG_DIR))

    best_mAP = -1
    info = list()

    # Notice: we will not evaluate the last data
    for epoch in range(1, cfg.TRAIN.EPOCH_NUM):
        # check whether t+1 CAS exists
        actions_json_file_next = os.path.join(
            cfg.BASIC.CKPT_DIR, cfg.TEST.RESULT_DIR,
            str(epoch + 1).zfill(3) + '_' + args.name + '.json')
        while not os.path.exists(actions_json_file_next):
            time.sleep(1)

        # dispose the json files at time step t
        actions_json_file_cas = os.path.join(
            cfg.BASIC.CKPT_DIR, cfg.TEST.RESULT_DIR,
            str(epoch).zfill(3) + '_' + args.name + '.json')
        writer, best_mAP, info = post_process(cfg, actions_json_file_cas,
                                              writer, best_mAP, info, epoch,
                                              args.name)
        # if best_mAP >0.2 and best_mAP == info[2][4]:
        #     save_best_record_txt(cfg, info, os.path.join(cfg.BASIC.CKPT_DIR, cfg.TEST.RESULT_DIR, "best_record_{}_{}.txt".format(args.name, str(epoch).zfill(3))))
    save_best_record_txt(
        cfg, info,
        os.path.join(cfg.BASIC.CKPT_DIR, cfg.TEST.RESULT_DIR,
                     "best_record_{}.txt".format(args.name)))

    writer.close()
def main():
    args = args_parser()
    update_config(args.cfg)
    if cfg.BASIC.SHOW_CFG:
        pprint.pprint(cfg)
    # prepare running environment for the whole project
    # prepare_env(cfg)

    # dataloader
    val_dset = WtalDataset(cfg, cfg.DATASET.VAL_SPLIT)
    val_loader = DataLoader(val_dset,
                            batch_size=cfg.TEST.BATCH_SIZE,
                            shuffle=False,
                            num_workers=cfg.BASIC.WORKERS,
                            pin_memory=cfg.BASIC.PIN_MEMORY)

    # network
    model_cas = LocNet(cfg)
    # model.apply(weight_init)
    model_cam = LocNet(cfg)

    model_cas.cuda()
    model_cam.cuda()

    # weight_file = ""
    # weight_file = "/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/0_NeurIPS2020_code_ok/results_and_model/thumos14_checkpoint_best_cas_epoch125_iou0.5__0.2928.pth"
    # weight_file = "/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/0_NeurIPS2020_code_ok/results_and_model/anet12_checkpoint_best_cas_epoch30_map_0.2394.pth"
    # weight_file = "/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/anet13/anet13_same_as_anet12_seed1_epoch45_TOPK_K_R_0.6_LR_DECAY26_save_every_model/checkpoint_best_cas_epoch35.pth"
    weight_file_cas = "/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/thumos14/thumos_ablation_only_cas_only_cam_separate_weight_save_model/checkpoint_best_cas_epoch75_0.2055.pth"

    weight_file_cam = "/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/thumos14/thumos_ablation_only_cas_only_cam_separate_weight_save_model/checkpoint_best_cam_epoch89_0.176.pth"
    from utils.utils import load_weights
    model_cas = load_weights(model_cas, weight_file_cas)
    model_cam = load_weights(model_cam, weight_file_cam)

    epoch = 911
    output_json_file_cas, test_acc_cas = evaluate_fuse_sequence(
        cfg, val_loader, model_cas, model_cam, epoch)
    # output_json_file_cas, test_acc_cas = evaluate(cfg, val_loader, model, epoch)
    if cfg.BASIC.VERBOSE:
        print('test_acc, cas %f' % (test_acc_cas))
    mAP, average_mAP = evaluate_mAP(
        cfg, output_json_file_cas,
        os.path.join(cfg.BASIC.CKPT_DIR, cfg.DATASET.GT_FILE),
        cfg.BASIC.VERBOSE)
def main():
    args = args_parser()
    update_config(args.cfg)
    if cfg.BASIC.SHOW_CFG:
        pprint.pprint(cfg)
    # prepare running environment for the whole project
    prepare_env(cfg)

    # dataloader
    val_dset = WtalDataset(cfg, cfg.DATASET.VAL_SPLIT)
    val_loader = DataLoader(val_dset,
                            batch_size=cfg.TEST.BATCH_SIZE,
                            shuffle=False,
                            num_workers=cfg.BASIC.WORKERS,
                            pin_memory=cfg.BASIC.PIN_MEMORY)

    # network
    model = LocNet(cfg)
    # model.apply(weight_init)

    model.cuda()

    # weight_file = ""
    weight_file = "/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/0_NeurIPS2020_code_ok/results_and_model/thumos14_checkpoint_best_cas_epoch125_iou0.5__0.2928.pth"
    # weight_file = "/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/0_NeurIPS2020_code_ok/results_and_model/anet12_checkpoint_best_cas_epoch30_map_0.2394.pth"
    # weight_file = "/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/0_NeurIPS2020_code_ok/results_and_model/anet13_checkpoint_best_cas_epoch35_map_0.2348.pth"

    epoch = 801
    from utils.utils import load_weights
    model = load_weights(model, weight_file)

    # actions_json_file = evaluate(cfg, val_loader, model, epoch)
    #
    # evaluate_mAP(cfg, actions_json_file, os.path.join(cfg.BASIC.CKPT_DIR, cfg.DATASET.GT_FILE))

    # output_json_file_cas, output_json_file_cam, test_acc_cas, test_acc_cam = evaluate(cfg, val_loader, model, epoch)
    output_json_file_cas, test_acc_cas = evaluate(cfg, val_loader, model,
                                                  epoch)
    if cfg.BASIC.VERBOSE:
        print('test_acc, cas %f' % (test_acc_cas))
    mAP, average_mAP = evaluate_mAP(
        cfg, output_json_file_cas,
        os.path.join(cfg.BASIC.CKPT_DIR, cfg.DATASET.GT_FILE),
        cfg.BASIC.VERBOSE)
def main():
    args = args_parser()
    update_config(args.cfg)
    if cfg.BASIC.SHOW_CFG:
        pprint.pprint(cfg)
    # # prepare running environment for the whole project
    # prepare_env(cfg)
    #
    #
    # # dataloader
    # val_dset = WtalDataset(cfg, cfg.DATASET.VAL_SPLIT)
    # val_loader = DataLoader(val_dset, batch_size=cfg.TEST.BATCH_SIZE, shuffle=False,
    #                         num_workers=cfg.BASIC.WORKERS, pin_memory=cfg.BASIC.PIN_MEMORY)
    #
    # # network
    # model = LocNet(cfg)
    # # model.apply(weight_init)
    #
    # model.cuda()
    #
    #
    # weight_file = '/disk3/zt/code/actloc/thumos/17_CAS_CAM_fast_tuning/output/20class_seed_0_save_model/checkpoint_best_cas.pth'
    # epoch = 800
    # from utils.utils import load_weights
    # model = load_weights(model, weight_file)
    #
    # # actions_json_file = evaluate(cfg, val_loader, model, epoch)
    # #
    # # evaluate_mAP(cfg, actions_json_file, os.path.join(cfg.BASIC.CKPT_DIR, cfg.DATASET.GT_FILE))
    #
    # output_json_file_cas, output_json_file_cam, test_acc_cas, test_acc_cam = evaluate(cfg, val_loader, model, epoch)
    # if cfg.BASIC.VERBOSE:
    #     print('test_acc, cas %f, cam %f' % (test_acc_cas, test_acc_cam))
    # output_json_file_cas = "/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/anet12/anet12_0.2350_cam_inv_1_seed7_epoch36_TOPK_K_R_0.25__save_model_LR_decay/030_cas.json"
    # output_json_file_cas = "/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/anet13/anet13_0.2348_ok/035_cas.json"
    output_json_file_cas = "/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/thumos14/thumos_ablation_only_cas/134_cas.json"
    output_json_file_cas = "/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/thumos14/thumos_ablation_only_cas_only_cam_separate_weight/089_cam.json"
    mAP, average_mAP = evaluate_mAP(
        cfg, output_json_file_cas,
        os.path.join(cfg.BASIC.CKPT_DIR, cfg.DATASET.GT_FILE),
        cfg.BASIC.VERBOSE)
Exemple #12
0
def main():
    args = args_parser()
    update_config(args.cfg)
    if cfg.BASIC.SHOW_CFG:
        pprint.pprint(cfg)
    # prepare running environment for the whole project
    # prepare_env(cfg)

    # dataloader
    val_dset = WtalDataset(cfg, cfg.DATASET.VAL_SPLIT)
    val_loader = DataLoader(val_dset,
                            batch_size=cfg.TEST.BATCH_SIZE,
                            shuffle=False,
                            num_workers=cfg.BASIC.WORKERS,
                            pin_memory=cfg.BASIC.PIN_MEMORY)

    # network
    model = LocNet(cfg)
    # model.apply(weight_init)

    model.cuda()

    # weight_file = '/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/anet12/anet12_0.2350_cam_inv_1_seed7_epoch36_TOPK_K_R_0.25__save_model_LR_decay/anet12_checkpoint_best_cas_0.2394.pth'
    weight_file = "/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/anet13/anet13_same_as_anet12_seed1_epoch45_TOPK_K_R_0.6_LR_DECAY26_save_every_model/anet13_checkpoint_best_cas_epoch28_0.2178.pth"
    epoch = 603
    from utils.utils import load_weights
    model = load_weights(model, weight_file)

    # actions_json_file = evaluate(cfg, val_loader, model, epoch)
    #
    # evaluate_mAP(cfg, actions_json_file, os.path.join(cfg.BASIC.CKPT_DIR, cfg.DATASET.GT_FILE))

    # output_json_file_cas, output_json_file_cam, test_acc_cas, test_acc_cam = evaluate(cfg, val_loader, model, epoch)
    output_json_file_cas, test_acc_cas = evaluate(cfg, val_loader, model,
                                                  epoch)
    if cfg.BASIC.VERBOSE:
        print('test_acc, cas %f' % (test_acc_cas))
    mAP, average_mAP = evaluate_mAP(
        cfg, output_json_file_cas,
        os.path.join(cfg.BASIC.CKPT_DIR, cfg.DATASET.GT_FILE),
        cfg.BASIC.VERBOSE)
Exemple #13
0
def main():
    args = args_parser()
    update_config(args.cfg)
    if cfg.BASIC.SHOW_CFG:
        pprint.pprint(cfg)
    # prepare running environment for the whole project
    prepare_env(cfg)


    # dataloader
    val_dset = WtalDataset(cfg, cfg.DATASET.VAL_SPLIT)
    val_loader = DataLoader(val_dset, batch_size=cfg.TEST.BATCH_SIZE, shuffle=False,
                            num_workers=cfg.BASIC.WORKERS, pin_memory=cfg.BASIC.PIN_MEMORY)

    # network
    model = LocNet(cfg)
    # model.apply(weight_init)

    model.cuda()


    # weight_file = '/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/0_NeurIPS2020_code_ok/results_and_model/thumos14_checkpoint_best_cas_epoch125_iou0.5__0.2928.pth'
    # weight_file = '/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/0_NeurIPS2020_code_ok/results_and_model/anet12_checkpoint_best_cas_epoch30_map_0.2545.pth'
    weight_file = '/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/0_NeurIPS2020_code_ok/results_and_model/anet13_checkpoint_best_cas_epoch35_map_0.2348.pth'
    res_dir = os.path.join(cfg.BASIC.CKPT_DIR, cfg.TEST.RESULT_DIR,'vis/cas_weight_gt_idx_iou_0.85_mul_instance')
    if not os.path.exists(res_dir):
        os.makedirs(res_dir)

    from utils.utils import load_weights
    model = load_weights(model, weight_file)

    # epoch = 600
    # actions_json_file, _ = evaluate(cfg, val_loader, model, epoch)
    # evaluate_mAP(cfg, actions_json_file, os.path.join(cfg.BASIC.CKPT_DIR, cfg.DATASET.GT_FILE), cfg.BASIC.VERBOSE)

    is_minmax_norm = True

    evaluate_vis_cas_select_specific(cfg, val_loader, model, res_dir, is_minmax_norm)
def main():
    args = args_parser()
    update_config(args.cfg)
    if cfg.BASIC.SHOW_CFG:
        pprint.pprint(cfg)
    # prepare running environment for the whole project
    prepare_env(cfg)

    # dataloader
    val_dset = WtalDataset(cfg, cfg.DATASET.VAL_SPLIT)
    val_loader = DataLoader(val_dset,
                            batch_size=cfg.TEST.BATCH_SIZE,
                            shuffle=False,
                            num_workers=cfg.BASIC.WORKERS,
                            pin_memory=cfg.BASIC.PIN_MEMORY)

    # network
    model = LocNet(cfg)
    # model.apply(weight_init)

    model.cuda()

    # weight_file = ''
    weight_file = '/disk/yangle/Short-Actions/ECM/output/thumos14/ECM_baseline/checkpoint_best_150.pth'
    res_dir = os.path.join(cfg.BASIC.CKPT_DIR, cfg.TEST.RESULT_DIR,
                           'vis/ECM_thumos_score')
    if not os.path.exists(res_dir):
        os.makedirs(res_dir)

    from utils.utils import load_weights
    model = load_weights(model, weight_file)

    epoch = 600
    # output_json_file_cas, test_acc_cas = evaluate(cfg, val_loader, model, epoch)
    output_json_file_cas = '/disk/yangle/Short-Actions/ECM/output/thumos14/ECM_baseline/vis/ecm.json'
    evaluate_mAP(cfg, output_json_file_cas,
                 os.path.join(cfg.BASIC.CKPT_DIR, cfg.DATASET.GT_FILE),
                 cfg.BASIC.VERBOSE)
def main():
    args = args_parser()
    update_config(args.cfg)
    if cfg.BASIC.SHOW_CFG:
        pprint.pprint(cfg)
    # prepare running environment for the whole project
    prepare_env(cfg)

    # dataloader
    val_dset = WtalDataset(cfg, cfg.DATASET.VAL_SPLIT)
    val_loader = DataLoader(val_dset,
                            batch_size=cfg.TEST.BATCH_SIZE,
                            shuffle=False,
                            num_workers=cfg.BASIC.WORKERS,
                            pin_memory=cfg.BASIC.PIN_MEMORY)

    # network
    model = LocNet(cfg)
    # model.apply(weight_init)

    model.cuda()

    weight_file = '/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/anet12/anet12_no_inv_base_LR_0.0002_BATCH_128_save_model/checkpoint_best_cas_0.2243.pth'
    epoch = 600

    res_dir = '/disk3/zt/code/4_a/1_ECM_no_inv_drop/output/anet12/anet12_no_inv_base_LR_0.0002_BATCH_128_save_model/vis/cas_minmax_norm'
    if not os.path.exists(res_dir):
        os.makedirs(res_dir)

    from utils.utils import load_weights
    model = load_weights(model, weight_file)
    is_minmax_norm = True
    # actions_json_file = evaluate(cfg, val_loader, model, epoch)
    #
    # evaluate_mAP(cfg, actions_json_file, os.path.join(cfg.BASIC.CKPT_DIR, cfg.DATASET.GT_FILE))

    # output_json_file_cas, output_json_file_cam, test_acc_cas, test_acc_cam = evaluate(cfg, val_loader, model, epoch)
    evaluate_vis_cas(cfg, val_loader, model, epoch, res_dir, is_minmax_norm)
Exemple #16
0
def main():
    args = args_parser()
    update_config(args.cfg)
    if cfg.BASIC.SHOW_CFG:
        pprint.pprint(cfg)
    # prepare running environment for the whole project
    # prepare_env(cfg)


    # dataloader
    val_dset = WtalDataset(cfg, cfg.DATASET.VAL_SPLIT)
    val_loader = DataLoader(val_dset, batch_size=cfg.TEST.BATCH_SIZE, shuffle=False,
                            num_workers=cfg.BASIC.WORKERS, pin_memory=cfg.BASIC.PIN_MEMORY)

    # network
    model = LocNet(cfg)
    # model.apply(weight_init)

    model.cuda()

    # weight_file = ""
    weight_file = "/disk/yangle/Short-Actions/ECM/output/thumos14/ECM_baseline/checkpoint_best_150.pth"

    epoch = 601
    from utils.utils import load_weights
    model = load_weights(model, weight_file)

    # actions_json_file = evaluate(cfg, val_loader, model, epoch)
    #
    # evaluate_mAP(cfg, actions_json_file, os.path.join(cfg.BASIC.CKPT_DIR, cfg.DATASET.GT_FILE))

    # output_json_file_cas, output_json_file_cam, test_acc_cas, test_acc_cam = evaluate(cfg, val_loader, model, epoch)
    output_json_file_cas, test_acc_cas = evaluate(cfg, val_loader, model, epoch)
    if cfg.BASIC.VERBOSE:
        print('test_acc, cas %f' % (test_acc_cas))
    mAP, average_mAP = evaluate_mAP(cfg, output_json_file_cas, os.path.join(cfg.BASIC.CKPT_DIR, cfg.DATASET.GT_FILE), cfg.BASIC.VERBOSE)
Exemple #17
0
        weight = torch.matmul(feature_uns, embedding)  # [1, N, 1]
        weight = torch.squeeze(weight, dim=0)  # [N, 1]
        # weight_norm = self.softmax(weight)

        feature_wei = feature * weight  # [N, D]
        feature_agg = torch.sum(feature_wei, dim=0, keepdim=True)  # [1, D]
        feature_cls = self.lrelu(self.fc_cls1(feature_agg))
        # dropout
        feature_per = self.dropout(feature_cls)

        score = self.fc_cls2(feature_per)  # [1, 1]
        score = torch.squeeze(score, dim=0)
        score = self.sigmoid(score)  # we calculate BCEloss, thus use Sigmoid activation
        return score
        
        
if __name__ == '__main__':
    import sys
    sys.path.insert(0, '../../lib')
    from config.default import config as cfg
    from config.default import update_config

    cfg_file = '../../experiments/IENet.yaml'
    update_config(cfg_file)

    data = torch.randn((10, 97)).cuda()
    network = IENet(cfg).cuda()

    score = network(data)
    print(score.size(), score)
Exemple #18
0
                tabs.append('*')
        table.append(tabs)

    from tabulate import tabulate
    # tab_name = join(output_dir, 'table_fancy.txt')
    print(tabulate(table, headers, tablefmt='fancy_grid'))
    if savename is not None:
        print(tabulate(table, headers, tablefmt='plain'),
              file=open(savename, 'w'))


if __name__ == "__main__":
    from config.cmdline import parse_args
    from config.default import update_config, cfg
    args = parse_args()
    update_config(cfg, args)
    if args.usecpp:
        pass
    else:
        import torch
        if torch.cuda.is_available():
            device = torch.device('cuda')
        else:
            device = torch.device('cpu')
        from smplmodel.smplx_utils import SMPLX
        from smplmodel.geometry import batch_rodrigues
        smpl_render = SMPLX(model_type=cfg.MODEL.model_type,
                            gender=cfg.MODEL.gender,
                            batch_size=cfg.MODEL.batch_size,
                            device=device,
                            model_folder=cfg.MODEL.model_folder)
Exemple #19
0
def main():
    args = args_parser()
    update_config(args.cfg)
    if cfg.BASIC.SHOW_CFG:
        pprint.pprint(cfg)
    # prepare running environment for the whole project
    prepare_env(cfg)

    # log
    writer = SummaryWriter(
        log_dir=os.path.join(cfg.BASIC.CKPT_DIR, cfg.BASIC.LOG_DIR))

    # dataloader
    train_dset = WtalDataset(cfg, cfg.DATASET.TRAIN_SPLIT)
    train_loader = DataLoader(train_dset,
                              batch_size=cfg.TRAIN.BATCH_SIZE,
                              shuffle=True,
                              num_workers=cfg.BASIC.WORKERS,
                              pin_memory=cfg.BASIC.PIN_MEMORY)
    val_dset = WtalDataset(cfg, cfg.DATASET.VAL_SPLIT)
    val_loader = DataLoader(val_dset,
                            batch_size=cfg.TEST.BATCH_SIZE,
                            shuffle=False,
                            num_workers=cfg.BASIC.WORKERS,
                            pin_memory=cfg.BASIC.PIN_MEMORY)

    # network
    model = LocNet(cfg)

    model.cuda()

    # optimizer
    optimizer = optim.Adam(model.parameters(),
                           lr=cfg.TRAIN.LR,
                           betas=cfg.TRAIN.BETAS,
                           weight_decay=cfg.TRAIN.WEIGHT_DECAY)
    # criterion
    criterion = BasNetLoss()

    for epoch in range(1, cfg.TRAIN.EPOCH_NUM + 1):
        print('Epoch: %d:' % epoch)
        loss_average_cas, loss_average_cam, loss_average_consistency, loss_average_norm, loss_average_cam_inv = train(
            cfg, train_loader, model, optimizer, criterion)

        writer.add_scalar('train_loss/cas', loss_average_cas, epoch)
        writer.add_scalar('train_loss/cam', loss_average_cam, epoch)
        writer.add_scalar('train_loss/consistency', loss_average_consistency,
                          epoch)
        writer.add_scalar('train_loss/norm', loss_average_norm, epoch)
        writer.add_scalar('train_loss/cam_inv', loss_average_cam_inv, epoch)
        if cfg.BASIC.VERBOSE:
            print(
                'loss: cas %f, cam %f, consistency %f, norm %f, cam_inv %f' %
                (loss_average_cas, loss_average_cam, loss_average_consistency,
                 loss_average_norm, loss_average_cam_inv))

        # decay learning rate
        if epoch in cfg.TRAIN.LR_DECAY_EPOCHS:
            decay_lr(optimizer, factor=cfg.TRAIN.LR_DECAY_FACTOR)

        if epoch % cfg.TEST.EVAL_INTERVAL == 0:
            _, test_acc_cas = evaluate(cfg, val_loader, model, epoch)
            if cfg.BASIC.VERBOSE:
                print('test_acc, cas %f' % (test_acc_cas))
            writer.add_scalar('test_acc/cas', test_acc_cas, epoch)

            save_best_model(cfg,
                            epoch=epoch,
                            model=model,
                            optimizer=optimizer,
                            name='cas')
    writer.close()
Exemple #20
0
    opt.heads['depth'] = opt.num_output
    if opt.load_model == '':
        opt.load_model = '../models/fusion_3d_var.pth'
    if opt.gpus[0] >= 0:
        opt.device = torch.device('cuda:{}'.format(opt.gpus[0]))
    else:
        opt.device = torch.device('cpu')

    model, _, _ = create_model(cfg, opt)
    model = model.to(opt.device)
    model.eval()

    if os.path.isdir(opt.demo):
        ls = os.listdir(opt.demo)
        for file_name in sorted(ls):
            if is_image(file_name):
                image_name = os.path.join(opt.demo, file_name)
                print('Running {} ...'.format(image_name))
                image = cv2.imread(image_name)
                demo_image(image, image_name, model, opt)
    elif is_image(opt.demo):
        print('Running {} ...'.format(opt.demo))
        image = cv2.imread(opt.demo)
        demo_image(image, image_name, model, opt)


if __name__ == '__main__':
    opt = opts().parse()
    update_config(cfg, opt)
    main(opt)
Exemple #21
0
def main(opt):

    update_config(cfg, opt)

    if opt.disable_cudnn:
        torch.backends.cudnn.enabled = False
        print('Cudnn is disabled.')

    logger = Logger(opt)
    opt.device = torch.device('cuda:{}'.format(opt.gpus[0]))

    Dataset = dataset_factory[opt.dataset]
    train, val = task_factory[opt.task]

    model, optimizer, start_epoch = create_model(cfg, opt)

    if len(opt.gpus) > 1:
        model = torch.nn.DataParallel(model,
                                      device_ids=opt.gpus).cuda(opt.device)
    else:
        model = model.cuda(opt.device)

    val_loader = torch.utils.data.DataLoader(Dataset(opt, 'val'),
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=1,
                                             pin_memory=True)

    if opt.test:
        log_dict_train, preds = val(0, opt, val_loader, model)
        sio.savemat(os.path.join(opt.save_dir, 'preds.mat'),
                    mdict={'preds': preds})
        return

    train_loader = torch.utils.data.DataLoader(
        Dataset(opt, 'train'),
        batch_size=opt.batch_size * len(opt.gpus),
        shuffle=True,  # if opt.debug == 0 else False,
        num_workers=opt.num_workers,
        pin_memory=True)

    best = -1
    for epoch in range(start_epoch, opt.num_epochs + 1):
        mark = epoch if opt.save_all_models else 'last'
        log_dict_train, _ = train(epoch, opt, train_loader, model, optimizer)
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            log_dict_val, preds = val(epoch, opt, val_loader, model)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            if log_dict_val[opt.metric] > best:
                best = log_dict_val[opt.metric]
                save_model(os.path.join(opt.save_dir, 'model_best.pth'), epoch,
                           model)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       model, optimizer)
        logger.write('\n')
        if epoch in opt.lr_step:
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
    logger.close()
def main():
    args = args_parser()
    update_config(args.cfg)
    if cfg.BASIC.SHOW_CFG:
        pprint.pprint(cfg)
    # prepare running environment for the whole project
    prepare_env(cfg)

    # log
    writer = SummaryWriter(
        log_dir=os.path.join(cfg.BASIC.CKPT_DIR, cfg.BASIC.LOG_DIR))

    # dataloader
    train_dset = WtalDataset(cfg, cfg.DATASET.TRAIN_SPLIT)
    train_loader = DataLoader(train_dset,
                              batch_size=cfg.TRAIN.BATCH_SIZE,
                              shuffle=True,
                              num_workers=cfg.BASIC.WORKERS,
                              pin_memory=cfg.BASIC.PIN_MEMORY)
    val_dset = WtalDataset(cfg, cfg.DATASET.VAL_SPLIT)
    val_loader = DataLoader(val_dset,
                            batch_size=cfg.TEST.BATCH_SIZE,
                            shuffle=False,
                            num_workers=cfg.BASIC.WORKERS,
                            pin_memory=cfg.BASIC.PIN_MEMORY)

    # network
    model = LocNet(cfg)
    # model.apply(weight_init)

    model.cuda()

    # weight_file = "/disk3/zt/code/actloc/thumos/17_CAS_CAM_fast_tuning/output/20class_seed_0_save_model/checkpoint_best_cas_0.2701.pth"
    weight_file = '/disk3/zt/code/actloc/thumos/20_0.2701_try/output/debug_save_epoch30/checkpoint_best_cas.pth'

    from utils.utils import load_weights
    model = load_weights(model, weight_file)

    # optimizer
    optimizer = optim.Adam(model.parameters(),
                           lr=cfg.TRAIN.LR,
                           betas=cfg.TRAIN.BETAS,
                           weight_decay=cfg.TRAIN.WEIGHT_DECAY)

    optimizer.load_state_dict(torch.load(weight_file)['optimizer'])

    # criterion
    criterion = BasNetLoss()

    for epoch in range(1, cfg.TRAIN.EPOCH_NUM + 1):
        print('Epoch: %d:' % epoch)
        loss_average_cas, loss_average_cam, loss_average_consistency, loss_average_norm, loss_average_cas_inv, loss_average_cam_inv = train(
            cfg, train_loader, model, optimizer, criterion)

        writer.add_scalar('train_loss/cas', loss_average_cas, epoch)
        writer.add_scalar('train_loss/cam', loss_average_cam, epoch)
        writer.add_scalar('train_loss/consistency', loss_average_consistency,
                          epoch)
        writer.add_scalar('train_loss/norm', loss_average_norm, epoch)
        writer.add_scalar('train_loss/cas_inv', loss_average_cas_inv, epoch)
        writer.add_scalar('train_loss/cam_inv', loss_average_cam_inv, epoch)
        if cfg.BASIC.VERBOSE:
            print(
                'loss: cas %f, cam %f, consistency %f, norm %f, cas_inv %f, cam_inv %f'
                % (loss_average_cas, loss_average_cam,
                   loss_average_consistency, loss_average_norm,
                   loss_average_cas_inv, loss_average_cam_inv))

        # decay learning rate
        if epoch in cfg.TRAIN.LR_DECAY_EPOCHS:
            decay_lr(optimizer, factor=cfg.TRAIN.LR_DECAY_FACTOR)

        if epoch % cfg.TEST.EVAL_INTERVAL == 0:
            _, _, test_acc_cas, test_acc_cam = evaluate(
                cfg, val_loader, model, epoch)
            if cfg.BASIC.VERBOSE:
                print('test_acc, cas %f, cam %f' %
                      (test_acc_cas, test_acc_cam))
            writer.add_scalar('test_acc/cas', test_acc_cas, epoch)
            writer.add_scalar('test_acc/cam', test_acc_cam, epoch)

    writer.close()