Exemple #1
0
def main():
    args = parser.parse_args()

    data_const = HicoConstants(exp_ver=args.exp_ver)
    print('Creating output dir ...')
    io.mkdir_if_not_exists(data_const.result_dir + '/map', recursive=True)

    # Load hoi_list
    hoi_list_json = os.path.join(data_const.proc_dir, 'hoi_list.json')
    hoi_list = io.load_json_object(hoi_list_json)

    # Load subset ids to eval on
    split_ids_json = os.path.join(data_const.proc_dir, 'split_ids.json')
    split_ids = io.load_json_object(split_ids_json)
    global_ids = split_ids[args.subset]
    global_ids_set = set(global_ids)

    # Create gt_dets
    print('Creating GT dets ...')
    gt_dets = load_gt_dets(data_const.proc_dir, global_ids_set)

    eval_inputs = []
    for hoi in hoi_list:
        eval_inputs.append((hoi['id'], global_ids, gt_dets,
                            data_const.result_dir + '/pred_hoi_dets.hdf5',
                            data_const.result_dir + '/map'))

    # import ipdb; ipdb.set_trace()
    # eval_hoi(*eval_inputs[0])

    print(f'Starting a pool of {args.num_processes} workers ...')
    p = Pool(args.num_processes)

    print(f'Begin mAP computation ...')
    output = p.starmap(eval_hoi, eval_inputs)
    #output = eval_hoi('003',global_ids,gt_dets,args.pred_hoi_dets_hdf5,args.out_dir)

    p.close()
    p.join()

    mAP = {
        'AP': {},
        'mAP': 0,
        'invalid': 0,
    }
    map_ = 0
    count = 0
    for ap, hoi_id in output:
        mAP['AP'][hoi_id] = ap
        if not np.isnan(ap):
            count += 1
            map_ += ap

    mAP['mAP'] = map_ / count
    mAP['invalid'] = len(output) - count

    mAP_json = os.path.join(data_const.result_dir + '/map', 'mAP.json')
    io.dump_json_object(mAP, mAP_json)

    print(f'APs have been saved to {data_const.result_dir}/map')
Exemple #2
0
def main():
    args = parser.parse_args()
    data_const = HicoConstants(exp_ver=args.exp_ver)
    out_dir = data_const.result_dir+'/map'

    bin_to_hoi_ids = io.load_json_object(data_const.bin_to_hoi_ids_json)
    
    mAP_json = os.path.join(out_dir,'mAP.json')
    APs = io.load_json_object(mAP_json)['AP']
    bin_map = {}
    bin_count = {}
    for bin_id,hoi_ids in bin_to_hoi_ids.items():
        bin_map[bin_id] = compute_mAP(APs,hoi_ids)

    non_rare_hoi_ids = []
    for ul in bin_to_hoi_ids.keys():
        if ul=='10':
            continue
        non_rare_hoi_ids += bin_to_hoi_ids[ul]

    sample_complexity_analysis = {
        'bin': bin_map,
        'full': compute_mAP(APs,APs.keys()),
        'rare': bin_map['10'],
        'non_rare': compute_mAP(APs,non_rare_hoi_ids)
    }

    sample_complexity_analysis_json = os.path.join(
        out_dir,
        f'sample_complexity_analysis.json')
    io.dump_json_object(
        sample_complexity_analysis,
        sample_complexity_analysis_json)


    bin_names = sorted([int(ul) for ul in bin_map.keys()])
    bin_names = [str(ul) for ul in bin_names]
    bin_headers = ['0'] + bin_names
    bin_headers = [bin_headers[i]+'-'+str(int(ul)-1) for i,ul in enumerate(bin_headers[1:])]
    headers = ['Full','Rare','Non-Rare'] + bin_headers

    sca = sample_complexity_analysis
    values = [sca['full'],sca['rare'],sca['non_rare']] + \
        [bin_map[name] for name in bin_names]
    values = [str(round(v*100,2)) for v in values]

    print('Space delimited values that can be copied to spreadsheet and split by space')
    print(' '.join(headers))
    print(' '.join(values))
Exemple #3
0
    def __init__(self,
                 data_const=HicoConstants(),
                 subset='train',
                 data_aug=False,
                 sampler=None,
                 test=False):
        super(HicoDataset, self).__init__()

        self.data_aug = data_aug
        self.data_const = data_const
        self.test = test
        self.subset_ids = self._load_subset_ids(subset, sampler)
        self.sub_app_data = self._load_subset_app_data(subset)
        self.sub_spatial_data = self._load_subset_spatial_data(subset)
        self.word2vec = h5py.File(self.data_const.word2vec, 'r')
Exemple #4
0
def main():
    data_const = HicoConstants()

    hico_list = io.load_json_object(data_const.anno_list_json)
    global_ids = [anno['global_id'] for anno in hico_list]
    
    # Create and save splits
    split_ids = split(global_ids,0.2)

    split_ids_json = os.path.join(
        data_const.proc_dir,
        'split_ids.json')
    io.dump_json_object(split_ids,split_ids_json)

    # Create and save split stats
    split_stats = {}
    for subset_name,subset_ids in split_ids.items():
        split_stats[subset_name] = len(subset_ids)
        print(f'{subset_name}: {len(subset_ids)}')

    split_stats_json = os.path.join(
        data_const.proc_dir,
        'split_ids_stats.json')
    io.dump_json_object(split_stats,split_stats_json)
Exemple #5
0
def main():
    data_const = HicoConstants()
    anno_list = io.load_json_object(data_const.anno_list_json)
    hoi_cls_count = {}
    for anno in tqdm(anno_list):
        if 'test' in anno['global_id']:
            continue

        for hoi in anno['hois']:
            hoi_id = hoi['id']
            if hoi_id not in hoi_cls_count:
                hoi_cls_count[hoi_id] = 0
            hoi_cls_count[hoi_id] += len(hoi['connections'])

    upper_limits = [10, 50, 100, 500, 1000, 10000]
    bin_to_hoi_ids = bin_hoi_ids(hoi_cls_count, upper_limits)

    hoi_cls_count_json = os.path.join(data_const.proc_dir,
                                      'hoi_cls_count.json')
    io.dump_json_object(hoi_cls_count, hoi_cls_count_json)

    bin_to_hoi_ids_json = os.path.join(data_const.proc_dir,
                                       'bin_to_hoi_ids.json')
    io.dump_json_object(bin_to_hoi_ids, bin_to_hoi_ids_json)
Exemple #6
0
        return anno_list

    def convert(self):
        print('Creating anno list ...')
        anno_list = self.create_anno_list()
        io.dump_json_object(anno_list, self.const.anno_list_json)

        print('Creating hoi list ...')
        hoi_list = self.create_hoi_list()
        io.dump_json_object(hoi_list, self.const.hoi_list_json)

        print('Creating object list ...')
        object_list = sorted(list(set([hoi['object'] for hoi in hoi_list])))
        for i, obj in enumerate(object_list):
            object_list[i] = {'id': str(i + 1).zfill(3), 'name': obj}

        io.dump_json_object(object_list, self.const.object_list_json)

        print('Creating verb list ...')
        verb_list = sorted(list(set([hoi['verb'] for hoi in hoi_list])))
        for i, verb in enumerate(verb_list):
            verb_list[i] = {'id': str(i + 1).zfill(3), 'name': verb}

        io.dump_json_object(verb_list, self.const.verb_list_json)


if __name__ == '__main__':
    # import ipdb; ipdb.set_trace()
    hico_const = HicoConstants()
    converter = ConvertMat2Json(hico_const)
    converter.convert()
Exemple #7
0
                single_feat = []
                box1_wrt_img = box_with_respect_to_img(det_boxes[i], im_wh)
                box2_wrt_img = box_with_respect_to_img(det_boxes[j], im_wh)
                box1_wrt_box2 = box1_with_respect_to_box2(
                    det_boxes[i], det_boxes[j])
                offset = center_offset(det_boxes[i], det_boxes[j], im_wh)
                single_feat = single_feat + box1_wrt_img + box2_wrt_img + box1_wrt_box2 + offset.tolist(
                )
                # ipdb.set_trace()
                spatial_feats.append(single_feat)
    spatial_feats = np.array(spatial_feats)
    return spatial_feats


if __name__ == "__main__":
    data_const = HicoConstants()

    boxes_scores_rpn_ids_labels = h5py.File(
        data_const.boxes_scores_rpn_ids_labels, 'r')
    print('Load seleced boxes data file successfully...')
    split_id = io.load_json_object(data_const.split_ids_json)
    anno_list = io.load_json_object(data_const.anno_list_json)
    anno_dict = {item['global_id']: item for item in anno_list}
    print('Load original data successfully!')

    for subset in ['train_val', 'test']:
        # create saving file
        if subset == 'train_val':
            print('Creating trainval_spatial_feat.hdf5 file....')
            save_data = h5py.File(data_const.trainval_spatial_feat, 'w')
        else:
Exemple #8
0
def main(args):
    # use GPU if available else revert to CPU
    device = torch.device(
        'cuda' if torch.cuda.is_available() and args.gpu else 'cpu')
    print("Testing on", device)

    # Load checkpoint and set up model
    try:
        # load checkpoint
        checkpoint = torch.load(args.main_pretrained, map_location=device)
        print('vsgats Checkpoint loaded!')
        pg_checkpoint = torch.load(args.pretrained, map_location=device)

        # set up model and initialize it with uploaded checkpoint
        # ipdb.set_trace()
        if not args.exp_ver:
            args.exp_ver = args.pretrained.split(
                "/")[-2] + "_" + args.pretrained.split("/")[-1].split("_")[-2]
            # import ipdb; ipdb.set_trace()
        data_const = HicoConstants(feat_type=checkpoint['feat_type'],
                                   exp_ver=args.exp_ver)
        vs_gats = AGRNN(feat_type=checkpoint['feat_type'],
                        bias=checkpoint['bias'],
                        bn=checkpoint['bn'],
                        dropout=checkpoint['dropout'],
                        multi_attn=checkpoint['multi_head'],
                        layer=checkpoint['layers'],
                        diff_edge=checkpoint['diff_edge'])  #2 )
        vs_gats.load_state_dict(checkpoint['state_dict'])
        vs_gats.to(device)
        vs_gats.eval()

        print(pg_checkpoint['o_c_l'], pg_checkpoint['b_l'],
              pg_checkpoint['attn'], pg_checkpoint['lr'],
              pg_checkpoint['dropout'])
        pgception = PGception(action_num=pg_checkpoint['a_n'],
                              layers=1,
                              classifier_mod=pg_checkpoint['classifier_mod'],
                              o_c_l=pg_checkpoint['o_c_l'],
                              last_h_c=pg_checkpoint['last_h_c'],
                              bias=pg_checkpoint['bias'],
                              drop=pg_checkpoint['dropout'],
                              bn=pg_checkpoint['bn'],
                              agg_first=pg_checkpoint['agg_first'],
                              attn=pg_checkpoint['attn'],
                              b_l=pg_checkpoint['b_l'])

        pgception.load_state_dict(pg_checkpoint['state_dict'])
        pgception.to(device)
        pgception.eval()
        print('Constructed model successfully!')
    except Exception as e:
        print('Failed to load checkpoint or construct model!', e)
        sys.exit(1)

    print('Creating hdf5 file for predicting hoi dets ...')
    if not os.path.exists(data_const.result_dir):
        os.mkdir(data_const.result_dir)
    pred_hoi_dets_hdf5 = os.path.join(data_const.result_dir,
                                      'pred_hoi_dets.hdf5')
    pred_hois = h5py.File(pred_hoi_dets_hdf5, 'w')

    test_dataset = HicoDataset(data_const=data_const, subset='test', test=True)
    test_dataloader = DataLoader(dataset=test_dataset,
                                 batch_size=1,
                                 shuffle=False,
                                 collate_fn=collate_fn)
    # for global_id in tqdm(test_list):
    for data in tqdm(test_dataloader):
        global_id = data['global_id'][0]
        det_boxes = data['det_boxes'][0]
        roi_scores = data['roi_scores'][0]
        roi_labels = data['roi_labels'][0]
        node_num = data['node_num']
        features = data['features']
        spatial_feat = data['spatial_feat']
        word2vec = data['word2vec']
        pose_normalized = data["pose_to_human"]
        pose_to_obj_offset = data["pose_to_obj_offset"]

        # referencing
        pose_to_obj_offset, pose_normalized, features, spatial_feat, word2vec = pose_to_obj_offset.to(
            device), pose_normalized.to(device), features.to(
                device), spatial_feat.to(device), word2vec.to(device)
        outputs, attn, attn_lang = vs_gats(
            node_num, features, spatial_feat, word2vec,
            [roi_labels])  # !NOTE: it is important to set [roi_labels]
        pg_outputs = pgception(pose_normalized, pose_to_obj_offset)
        action_scores = nn.Sigmoid()(outputs + pg_outputs)
        action_scores = action_scores.cpu().detach().numpy()
        # save detection result
        pred_hois.create_group(global_id)
        det_data_dict = {}
        h_idxs = np.where(roi_labels == 1)[0]
        for h_idx in h_idxs:
            for i_idx in range(len(roi_labels)):
                if i_idx == h_idx:
                    continue
                if h_idx > i_idx:
                    edge_idx = h_idx * (node_num[0] - 1) + i_idx
                else:
                    edge_idx = h_idx * (node_num[0] - 1) + i_idx - 1

                score = roi_scores[h_idx] * roi_scores[i_idx] * action_scores[
                    edge_idx]
                try:
                    hoi_ids = metadata.obj_hoi_index[roi_labels[i_idx]]
                except Exception as e:
                    ipdb.set_trace()
                for hoi_idx in range(hoi_ids[0] - 1, hoi_ids[1]):
                    hoi_pair_score = np.concatenate(
                        (det_boxes[h_idx], det_boxes[i_idx],
                         np.expand_dims(score[metadata.hoi_to_action[hoi_idx]],
                                        0)),
                        axis=0)
                    if str(hoi_idx + 1).zfill(3) not in det_data_dict.keys():
                        det_data_dict[str(hoi_idx + 1).zfill(
                            3)] = hoi_pair_score[None, :]
                    else:
                        det_data_dict[str(hoi_idx + 1).zfill(3)] = np.vstack(
                            (det_data_dict[str(hoi_idx + 1).zfill(3)],
                             hoi_pair_score[None, :]))
        for k, v in det_data_dict.items():
            pred_hois[global_id].create_dataset(k, data=v)

    pred_hois.close()
Exemple #9
0
def main(args):
    # Load checkpoint and set up model
    try:
        # use GPU if available else revert to CPU
        device = torch.device(
            'cuda:0' if torch.cuda.is_available() and args.gpu else 'cpu')
        print("Testing on", device)

        # set up model and initialize it with uploaded checkpoint
        if args.dataset == 'hico':
            # load checkpoint
            checkpoint = torch.load(args.main_pretrained_hico,
                                    map_location=device)
            print('vsgats Checkpoint loaded!')
            pg_checkpoint = torch.load(args.pretrained_hico,
                                       map_location=device)
            data_const = HicoConstants(feat_type=checkpoint['feat_type'])
            vs_gats = vsgat_hico(feat_type=checkpoint['feat_type'],
                                 bias=checkpoint['bias'],
                                 bn=checkpoint['bn'],
                                 dropout=checkpoint['dropout'],
                                 multi_attn=checkpoint['multi_head'],
                                 layer=checkpoint['layers'],
                                 diff_edge=checkpoint['diff_edge'])  #2 )
        if args.dataset == 'vcoco':
            # load checkpoint
            checkpoint = torch.load(args.main_pretrained_vcoco,
                                    map_location=device)
            print('vsgats Checkpoint loaded!')
            pg_checkpoint = torch.load(args.pretrained_vcoco,
                                       map_location=device)
            data_const = VcocoConstants()
            vs_gats = vsgat_vcoco(feat_type=checkpoint['feat_type'],
                                  bias=checkpoint['bias'],
                                  bn=checkpoint['bn'],
                                  dropout=checkpoint['dropout'],
                                  multi_attn=checkpoint['multi_head'],
                                  layer=checkpoint['layers'],
                                  diff_edge=checkpoint['diff_edge'])  #2 )
        vs_gats.load_state_dict(checkpoint['state_dict'])
        vs_gats.to(device)
        vs_gats.eval()

        print(pg_checkpoint['o_c_l'], pg_checkpoint['b_l'],
              pg_checkpoint['attn'], pg_checkpoint['lr'],
              pg_checkpoint['dropout'])
        # pgception = PGception(action_num=24, classifier_mod='cat', o_c_l=[64,64,128,128], last_h_c=256, bias=pg_checkpoint['bias'], drop=pg_checkpoint['dropout'], bn=pg_checkpoint['bn'])
        pgception = PGception(action_num=pg_checkpoint['a_n'],
                              layers=1,
                              classifier_mod=pg_checkpoint['classifier_mod'],
                              o_c_l=pg_checkpoint['o_c_l'],
                              last_h_c=pg_checkpoint['last_h_c'],
                              bias=pg_checkpoint['bias'],
                              drop=pg_checkpoint['dropout'],
                              bn=pg_checkpoint['bn'],
                              agg_first=pg_checkpoint['agg_first'],
                              attn=pg_checkpoint['attn'],
                              b_l=pg_checkpoint['b_l'])
        # pgception = PGception(action_num=pg_checkpoint['a_n'], drop=pg_checkpoint['dropout'])
        pgception.load_state_dict(pg_checkpoint['state_dict'])
        pgception.to(device)
        pgception.eval()
        print('Constructed model successfully!')
    except Exception as e:
        print('Failed to load checkpoint or construct model!', e)
        sys.exit(1)

    # prepare for data
    if args.dataset == 'hico':
        original_imgs_dir = os.path.join(data_const.infer_dir,
                                         'original_imgs/hico')
        # original_imgs_dir = './datasets/hico/images/test2015'
        save_path = os.path.join(data_const.infer_dir, 'processed_imgs/hico')
        test_dataset = HicoDataset(data_const=data_const, subset='test')
        dataloader = sorted(os.listdir(original_imgs_dir))
        # dataloader = ['HICO_test2015_00000128.jpg']
    else:
        original_imgs_dir = os.path.join(data_const.infer_dir,
                                         'original_imgs/vcoco')
        # original_imgs_dir = './datasets/vcoco/coco/images/val2014'
        save_path = os.path.join(data_const.infer_dir, 'processed_imgs/vcoco')
        test_dataset = VcocoDataset(data_const=data_const,
                                    subset='vcoco_test',
                                    pg_only=False)
        # dataloader = DataLoader(dataset=test_dataset, batch_size=1, shuffle=False, collate_fn=vcoco_collate_fn)
        dataloader = sorted(os.listdir(original_imgs_dir))
        dataloader = ['COCO_val2014_000000150361.jpg']

    if not os.path.exists(original_imgs_dir):
        os.makedirs(original_imgs_dir)
    if not os.path.exists(save_path):
        os.mkdir(save_path)
        print('result images will be kept here{}'.format(save_path))

    # ipdb.set_trace()
    for data in tqdm(dataloader):
        # load corresponding data
        # print("Testing on image named {}".format(img))
        if args.dataset == 'hico':
            img = data
            global_id = data.split('.')[0]
            test_data = test_dataset.sample_date(global_id)
            test_data = collate_fn([test_data])
            det_boxes = test_data['det_boxes'][0]
            roi_scores = test_data['roi_scores'][0]
            roi_labels = test_data['roi_labels'][0]
            keypoints = test_data['keypoints'][0]
            edge_labels = test_data['edge_labels']
            node_num = test_data['node_num']
            features = test_data['features']
            spatial_feat = test_data['spatial_feat']
            word2vec = test_data['word2vec']
            pose_normalized = test_data["pose_to_human"]
            pose_to_obj_offset = test_data["pose_to_obj_offset"]
        else:
            # global_id = data['global_id'][0]
            img = data
            global_id = str(int((data.split('.')[0].split('_')[-1])))
            test_data = test_dataset.sample_date(global_id)
            test_data = vcoco_collate_fn([test_data])
            # img = data['img_name'][0][:].astype(np.uint8).tostring().decode('ascii').split("/")[-1]
            # test_data = data
            det_boxes = test_data['det_boxes'][0]
            roi_scores = test_data['roi_scores'][0]
            roi_labels = test_data['roi_labels'][0]
            edge_labels = test_data['edge_labels']
            node_num = test_data['node_num']
            features = test_data['features']
            spatial_feat = test_data['spatial_feat']
            word2vec = test_data['word2vec']
            pose_normalized = test_data["pose_to_human"]
            pose_to_obj_offset = test_data["pose_to_obj_offset"]

        # inference
        pose_to_obj_offset, pose_normalized, features, spatial_feat, word2vec = pose_to_obj_offset.to(
            device), pose_normalized.to(device), features.to(
                device), spatial_feat.to(device), word2vec.to(device)
        outputs, attn, attn_lang = vs_gats(
            node_num, features, spatial_feat, word2vec,
            [roi_labels])  # !NOTE: it is important to set [roi_labels]
        pg_outputs = pgception(pose_normalized, pose_to_obj_offset)
        # action_score = nn.Sigmoid()(outputs+pg_outputs)
        # action_score = action_score.cpu().detach().numpy()
        det_outputs = nn.Sigmoid()(outputs + pg_outputs)
        det_outputs = det_outputs.cpu().detach().numpy()

        # show result
        # import ipdb; ipdb.set_trace()
        if args.dataset == 'hico':
            image = Image.open(
                os.path.join('datasets/hico/images/test2015',
                             img)).convert('RGB')
            image_temp = image.copy()
            gt_img = vis_img(image,
                             det_boxes,
                             roi_labels,
                             roi_scores,
                             edge_labels.cpu().numpy(),
                             score_thresh=0.5)
            det_img = vis_img(image_temp,
                              det_boxes,
                              roi_labels,
                              roi_scores,
                              det_outputs,
                              score_thresh=0.5)
        if args.dataset == 'vcoco':
            image = Image.open(
                os.path.join(data_const.original_image_dir, 'val2014',
                             img)).convert('RGB')
            image_temp = image.copy()
            gt_img = vis_img_vcoco(image,
                                   det_boxes,
                                   roi_labels,
                                   roi_scores,
                                   edge_labels.cpu().numpy(),
                                   score_thresh=0.1)
            det_img = vis_img_vcoco(image_temp,
                                    det_boxes,
                                    roi_labels,
                                    roi_scores,
                                    det_outputs,
                                    score_thresh=0.5)

        # det_img.save('/home/birl/ml_dl_projects/bigjun/hoi/VS_GATs/inference_imgs/original_imgs'+'/'+img)
        det_img.save(save_path + '/' + img.split("/")[-1])
def main():
    # exp_name = 'factors_rcnn_det_prob_appearance_boxes_and_object_label_human_pose'
    # exp_dir = os.path.join(
    #     os.getcwd(),
    #     f'data_symlinks/hico_exp/hoi_classifier/{exp_name}')

    # map_json = os.path.join(
    #     exp_dir,
    #     'mAP_eval/test_30000/mAP.json')

    # map_json = '/home/birl/ml_dl_projects/bigjun/hoi/agrnn/result/hico/final_ver/map/mAP.json'
    map_json = '/home/birl/ml_dl_projects/bigjun/hoi/VS_GATs/result/hico/vsgat_trainval2_260/map/mAP.json'
    hoi_aps = io.load_json_object(map_json)['AP']

    data_const = HicoConstants()
    hoi_list = io.load_json_object(data_const.hoi_list_json)

    verb_to_hoi_id = {}
    for hoi in hoi_list:
        hoi_id = hoi['id']
        verb = hoi['verb']
        if verb not in verb_to_hoi_id:
            verb_to_hoi_id[verb] = []
        verb_to_hoi_id[verb].append(hoi_id)

    per_verb_hoi_aps = []
    for verb, hoi_ids in verb_to_hoi_id.items():
        verb_obj_aps = []
        for hoi_id in hoi_ids:
            verb_obj_aps.append(hoi_aps[hoi_id] * 100)

        per_verb_hoi_aps.append((verb, verb_obj_aps))
    # import ipdb; ipdb.set_trace()
    per_verb_hoi_aps = sorted(per_verb_hoi_aps, key=lambda x: np.median(x[1]))

    N = len(per_verb_hoi_aps)
    c = ['hsl(' + str(h) + ',50%' + ',50%)' for h in np.linspace(0, 360, N)]
    data = []
    for i, (verb, aps) in enumerate(per_verb_hoi_aps):
        trace = go.Box(
            y=aps,
            name=" ".join(verb.split("_")),
            boxpoints=False,  #"outliers"
            marker={'color': c[i]},
            line={'width': 1})
        data.append(trace)

    layout = go.Layout(
        plot_bgcolor='#FFFFFF',
        # paper_bgcolor='#DBDBDB',
        yaxis=dict(
            title='AP of HOI Categories',
            range=[0, 80],
            titlefont=dict(size=15),
            showgrid=True,
            gridcolor='#DBDBDB',
            # showline=True,
            # linecolor='#666666'
        ),
        xaxis=dict(title='Interactions',
                   titlefont=dict(size=15),
                   tickangle=45,
                   tickfont=dict(size=8, ),
                   showline=True,
                   linecolor='#666666'),
        height=500,
        margin=go.Margin(
            l=100,
            r=100,
            b=150,
            t=50,
        ),
    )

    filename = os.path.join('./inference_imgs', 'obj_aps_per_interaction.html')
    plotly.offline.plot({
        'data': data,
        'layout': layout
    },
                        filename=filename,
                        auto_open=False)
Exemple #11
0
def main(args):
    # use GPU if available else revert to CPU
    device = torch.device('cuda' if torch.cuda.is_available() and args.gpu else 'cpu')
    print("Testing on", device)

    # Load checkpoint and set up model
    try:
        # load checkpoint
        checkpoint = torch.load(args.pretrained, map_location=device)
        print('Checkpoint loaded!')

        # set up model and initialize it with uploaded checkpoint
        # ipdb.set_trace()
        if not args.exp_ver:
            args.exp_ver = args.pretrained.split("/")[-3]+"_"+args.pretrained.split("/")[-1].split("_")[-2]
        data_const = HicoConstants(feat_type=checkpoint['feat_type'], exp_ver=args.exp_ver)
        model = AGRNN(feat_type=checkpoint['feat_type'], bias=checkpoint['bias'], bn=checkpoint['bn'], dropout=checkpoint['dropout'], multi_attn=checkpoint['multi_head'], layer=checkpoint['layers'], diff_edge=checkpoint['diff_edge']) #2 )
        # ipdb.set_trace()
        model.load_state_dict(checkpoint['state_dict'])
        model.to(device)
        model.eval()
        print('Constructed model successfully!')
    except Exception as e:
        print('Failed to load checkpoint or construct model!', e)
        sys.exit(1)
    
    print('Creating hdf5 file for predicting hoi dets ...')
    if not os.path.exists(data_const.result_dir):
        os.mkdir(data_const.result_dir)
    pred_hoi_dets_hdf5 = os.path.join(data_const.result_dir, 'pred_hoi_dets.hdf5')
    pred_hois = h5py.File(pred_hoi_dets_hdf5,'w')

    test_dataset = HicoDataset(data_const=data_const, subset='test', test=True)
    test_dataloader = DataLoader(dataset=test_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn)
    # for global_id in tqdm(test_list): 
    for data in tqdm(test_dataloader):
        train_data = data
        global_id = train_data['global_id'][0]
        # img_name = train_data['img_name'][0]
        det_boxes = train_data['det_boxes'][0]
        roi_scores = train_data['roi_scores'][0]
        roi_labels = train_data['roi_labels'][0]
        node_num = train_data['node_num']
        features = train_data['features'] 
        spatial_feat = train_data['spatial_feat']
        word2vec = train_data['word2vec']

        # referencing
        features, spatial_feat, word2vec = features.to(device), spatial_feat.to(device), word2vec.to(device)
        outputs, attn, attn_lang = model(node_num, features, spatial_feat, word2vec, [roi_labels])    # !NOTE: it is important to set [roi_labels] 
        
        action_score = nn.Sigmoid()(outputs)
        action_score = action_score.cpu().detach().numpy()
        attn = attn.cpu().detach().numpy()
        attn_lang = attn_lang.cpu().detach().numpy()
        # save detection result
        pred_hois.create_group(global_id)
        det_data_dict = {}
        h_idxs = np.where(roi_labels == 1)[0]
        labeled_edge_list = np.cumsum(node_num - np.arange(len(h_idxs)) - 1)
        labeled_edge_list[-1] = 0
        for h_idx in h_idxs:
            for i_idx in range(len(roi_labels)):
                if i_idx <= h_idx:
                    continue
                # import ipdb; ipdb.set_trace()
                edge_idx = labeled_edge_list[h_idx-1] + (i_idx-h_idx-1)
                # score = roi_scores[h_idx] * roi_scores[i_idx] * action_score[edge_idx] * (attn[h_idx][i_idx-1]+attn_lang[h_idx][i_idx-1])
                score = roi_scores[h_idx] * roi_scores[i_idx] * action_score[edge_idx]
                try:
                    hoi_ids = metadata.obj_hoi_index[roi_labels[i_idx]]
                except Exception as e:
                    ipdb.set_trace()
                for hoi_idx in range(hoi_ids[0]-1, hoi_ids[1]):
                    hoi_pair_score = np.concatenate((det_boxes[h_idx], det_boxes[i_idx], np.expand_dims(score[metadata.hoi_to_action[hoi_idx]], 0)), axis=0)
                    if str(hoi_idx+1).zfill(3) not in det_data_dict.keys():
                        det_data_dict[str(hoi_idx+1).zfill(3)] = hoi_pair_score[None,:]
                    else:
                        det_data_dict[str(hoi_idx+1).zfill(3)] = np.vstack((det_data_dict[str(hoi_idx+1).zfill(3)], hoi_pair_score[None,:]))
        for k, v in det_data_dict.items():
            pred_hois[global_id].create_dataset(k, data=v)

    pred_hois.close()
Exemple #12
0
    '--diff_edge',
    type=str2bool,
    default='false',
    required=True,
    help='h_h edge, h_o edge, o_o edge are different with each other')

parser.add_argument(
    '--sampler',
    type=float,
    default=0,
    help='h_h edge, h_o edge, o_o edge are different with each other')

args = parser.parse_args()

if __name__ == "__main__":
    data_const = HicoConstants(feat_type=args.feat_type)
    run_model(args, data_const)

    # # import ipdb; ipdb.set_trace()
    # if args.data_aug:
    #     # filter ROIs
    #     keep_inds = list(set(np.where(node_labels.cpu().numpy() == 1)[0]))
    #     original_inds = np.arange(node_num[0])
    #     remain_inds = np.delete(original_inds, keep_inds, axis=0)
    #     random_select_inds = np.array(random.sample(remain_inds.tolist(), int(remain_inds.shape[0]/2)))
    #     choose_inds = sorted(np.hstack((keep_inds,random_select_inds)))
    #     # remove_inds = [x for x in original_inds if x not in choose_inds]
    #     if len(keep_inds)==0 or len(choose_inds)==1:
    #         continue

    #     # re-construct the data