Ejemplo n.º 1
0
def BSN_inference_PEM(opt):
    model = PEM(opt)
    checkpoint = torch.load(opt["checkpoint_path"]+"/"+opt["arch"]+"_pem_best.pth.tar")
    base_dict = {'.'.join(k.split('.')[1:]): v for k,v in list(checkpoint['state_dict'].items())}
    model.load_state_dict(base_dict)
    model = torch.nn.DataParallel(model, device_ids=GPU_IDs).cuda()
    model.eval()
    
    test_loader = torch.utils.data.DataLoader(ProposalDataSet(opt,subset=opt["pem_inference_subset"]),
                                                batch_size=model.module.batch_size, shuffle=False,
                                                num_workers=8, pin_memory=True,drop_last=False)
    
    for idx,(video_feature,video_xmin,video_xmax,video_xmin_score,video_xmax_score) in enumerate(test_loader):
        video_name = test_loader.dataset.video_list[idx]
        video_conf = model(video_feature).view(-1).detach().cpu().numpy()
        video_xmin = video_xmin.view(-1).cpu().numpy()
        video_xmax = video_xmax.view(-1).cpu().numpy()
        video_xmin_score = video_xmin_score.view(-1).cpu().numpy()
        video_xmax_score = video_xmax_score.view(-1).cpu().numpy()
        
        df=pd.DataFrame()
        df["xmin"]=video_xmin
        df["xmax"]=video_xmax
        df["xmin_score"]=video_xmin_score
        df["xmax_score"]=video_xmax_score
        df["iou_score"]=video_conf
        
        df.to_csv("./output/"+opt["arch"]+opt["fix_scale"]+"_PEM_results/"+video_name+".csv",index=False)
Ejemplo n.º 2
0
def BSN_inference_PEM(opt):
    '''
    step - 1. load the PEM-model
    step - 2. load the dataset
    '''

    # step - 1
    model = PEM(opt)
    checkpoint = torch.load(opt["checkpoint_path"]+"/pem_best.pth.tar")
    base_dict = {'.'.join(k.split('.')[1:]): v for k,v in list(checkpoint['state_dict'].items())}
    model.load_state_dict(base_dict)

    model = torch.nn.DataParallel(model, device_ids=[0]).cuda()

    model.eval()

    # pem_inference_subset = 'validation'
    test_loader = torch.utils.data.DataLoader(ProposalDataSet(opt, subset=opt["pem_inference_subset"]),
                                                batch_size=model.module.batch_size, shuffle=False,
                                                num_workers=8, pin_memory=True, drop_last=False)

    count = 0
    for idx, (video_feature, video_xmin, video_xmax, video_xmin_score, video_xmax_score) in enumerate(test_loader):

        video_name = test_loader.dataset.video_list[idx]
        video_conf = model(video_feature).view(-1).detach().cpu().numpy() # prob for proposal
        video_xmin = video_xmin.view(-1).cpu().numpy()
        video_xmax = video_xmax.view(-1).cpu().numpy()
        video_xmin_score = video_xmin_score.view(-1).cpu().numpy()
        video_xmax_score = video_xmax_score.view(-1).cpu().numpy()

        df = pd.DataFrame()
        df['xmin'] = video_xmin
        df['xmax'] = video_xmax
        df['xmin_score'] = video_xmin_score
        df['xmax_score'] = video_xmax_score
        df['iou_score'] = video_conf

        df.to_csv("./output/PEM_results/"+video_name+".csv",index=False)
        count += 1
    print('there are %5d results' % count)
def BSN_inference_PEM(opt):
    output_dir = os.path.join(opt['pem_inference_results_dir'],
                              opt['checkpoint_path'].split('/')[-1])
    checkpoint_epoch = opt['checkpoint_epoch']
    if checkpoint_epoch is not None:
        checkpoint_path = os.path.join(
            opt['checkpoint_path'], 'pem_checkpoint.%d.pth' % checkpoint_epoch)
        output_dir = os.path.join(output_dir, 'ckpt.%d' % checkpoint_epoch)
    else:
        checkpoint_path = os.path.join(opt['checkpoint_path'], 'pem_best.pth')
        output_dir = os.path.join(output_dir, 'ckpt.best')

    print('Checkpoint path is ', checkpoint_path)
    checkpoint = torch.load(checkpoint_path)
    base_dict = {
        '.'.join(k.split('.')[1:]): v
        for k, v in list(checkpoint['state_dict'].items())
    }

    model = PEM(opt)
    model.load_state_dict(base_dict)
    model = torch.nn.DataParallel(model).cuda()
    model.eval()

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    test_loader = torch.utils.data.DataLoader(
        ProposalDataSet(opt, subset=opt["pem_inference_subset"]),
        batch_size=model.module.batch_size,
        shuffle=False,
        num_workers=opt['data_workers'],
        pin_memory=True,
        drop_last=False)

    current_video = None
    columns = ["xmin", "xmax", "xmin_score", "xmax_score", "iou_score"]
    for idx, (index_list, video_feature, video_xmin, video_xmax,
              video_xmin_score, video_xmax_score) in enumerate(test_loader):
        video_conf = model(video_feature).view(-1).detach().cpu().numpy()
        video_xmin = video_xmin.view(-1).cpu().numpy()
        video_xmax = video_xmax.view(-1).cpu().numpy()
        video_xmin_score = video_xmin_score.view(-1).cpu().numpy()
        video_xmax_score = video_xmax_score.view(-1).cpu().numpy()

        index_list = index_list.numpy()
        for batch_idx, full_idx in enumerate(index_list):
            video, frame = test_loader.dataset.indices[full_idx]
            if not current_video:
                print('First video: ', video, full_idx)
                current_video = video
                current_data = [[] for _ in range(len(columns))]
            elif video != current_video:
                print('Changing from video %s to video %s: %d' %
                      (current_video, video, full_idx))
                video_result = np.stack(current_data, axis=1)
                video_df = pd.DataFrame(video_result, columns=columns)
                path = os.path.join(output_dir, '%s.csv' % current_video)
                video_df.to_csv(path, index=False)
                current_video = video
                current_data = [[] for _ in range(len(columns))]

            current_data[0].append(video_xmin[batch_idx])
            current_data[1].append(video_xmax[batch_idx])
            current_data[2].append(video_xmin_score[batch_idx])
            current_data[3].append(video_xmax_score[batch_idx])
            current_data[4].append(video_conf[batch_idx])

    if current_data[0]:
        video_result = np.stack(current_data, axis=1)
        video_df = pd.DataFrame(video_result, columns=columns)
        path = os.path.join(output_dir, '%s.csv' % current_video)
        video_df.to_csv(path, index=False)