示例#1
0
def scenegraph_to_viewObjects(dataset_train,
                              dataset_test,
                              top_k=(1, 3, 5, 10),
                              unused_factor=None,
                              check_count='all',
                              use_nn_score=True):
    print(
        f'#Check: {check_count}, # training: {len(dataset_train)}, # test: {len(dataset_test)}'
    )
    print('unused-factor:', unused_factor)
    print('use nn score::', use_nn_score)

    retrieval_dict = {}

    image_positions_train, image_orientations_train = dataset_train.image_positions, dataset_train.image_orientations
    image_positions_test, image_orientations_test = dataset_test.image_positions, dataset_test.image_orientations
    scene_names_train = dataset_train.image_scene_names
    scene_names_test = dataset_test.image_scene_names

    pos_results = {k: [] for k in top_k}
    ori_results = {k: [] for k in top_k}
    scene_results = {k: [] for k in top_k}

    if check_count == 'all':
        print('evaluating all indices...')
        check_indices = np.arange(len(dataset_test))
    else:
        print('evaluating random indices...')
        check_indices = np.random.randint(len(dataset_test), size=check_count)

    for i_idx, idx in enumerate(check_indices):
        scene_name_gt = scene_names_test[idx]

        #Score query SG vs. database scenes
        scene_graph = dataset_test.view_scenegraphs[idx]
        scores = np.zeros(len(dataset_train))
        for i in range(len(dataset_train)):
            score, _ = score_sceneGraph_to_viewObjects_nnRels(
                scene_graph,
                dataset_train.view_objects[i],
                unused_factor=unused_factor,
                use_nn_score=use_nn_score)
            scores[i] = score
        #scores=np.random.rand(len(dataset))

        sorted_indices = np.argsort(-1.0 *
                                    scores)  #Sort highest -> lowest scores
        pos_dists = np.linalg.norm(image_positions_train[:] -
                                   image_positions_test[idx],
                                   axis=1)  #CARE: also adds z-distance
        ori_dists = np.abs(image_orientations_train[:] -
                           image_orientations_test[idx])
        ori_dists = np.minimum(ori_dists, 2 * np.pi - ori_dists)

        retrieval_dict[idx] = sorted_indices[0:np.max(top_k)]

        for k in top_k:
            scene_correct = np.array([
                scene_name_gt == scene_names_train[retrieved_index]
                for retrieved_index in sorted_indices[0:k]
            ])
            topk_pos_dists = pos_dists[sorted_indices[0:k]]
            topk_ori_dists = ori_dists[sorted_indices[0:k]]

            #Append the average pos&ori. errors *for the cases that the scene was hit*
            pos_results[k].append(
                np.mean(topk_pos_dists[scene_correct == True]
                        ) if np.sum(scene_correct) > 0 else None)
            ori_results[k].append(
                np.mean(topk_ori_dists[scene_correct == True]
                        ) if np.sum(scene_correct) > 0 else None)
            scene_results[k].append(
                np.mean(scene_correct))  #Always append the scene-scores

    assert len(pos_results[k]) == len(ori_results[k]) == len(
        scene_results[k]) == len(check_indices)

    print('Saving retrieval results...')
    pickle.dump(retrieval_dict, open('retrievals_PureSG.pkl', 'wb'))

    return evaluate_topK(pos_results, ori_results, scene_results)
示例#2
0
def vge_graph2image(data_loader_train,
                    data_loader_test,
                    model,
                    top_k=(1, 3, 5, 10),
                    check_count='all'):
    retrieval_dict = {}

    dataset_train = data_loader_train.dataset
    dataset_test = data_loader_test.dataset

    #Turn on graph data
    dataset_train.return_graph_data = True
    dataset_test.return_graph_data = True

    image_positions_train, image_orientations_train = data_loader_train.dataset.image_positions, data_loader_train.dataset.image_orientations
    image_positions_test, image_orientations_test = data_loader_test.dataset.image_positions, data_loader_test.dataset.image_orientations
    scene_names_train = data_loader_train.dataset.image_scene_names
    scene_names_test = data_loader_test.dataset.image_scene_names

    pos_results = {k: [] for k in top_k}
    ori_results = {k: [] for k in top_k}
    scene_results = {k: [] for k in top_k}

    if check_count == 'all':
        print('evaluating all indices...')
        check_indices = np.arange(len(data_loader_test.dataset))
    else:
        print('evaluating random indices...')
        check_indices = np.random.randint(len(data_loader_test.dataset),
                                          size=check_count)

    print('Building embeddings...')
    x_train, v_train = torch.tensor([]).cuda(), torch.tensor(
        []).cuda()  #Visual and Semantic embeddings for train dataset
    x_test, v_test = torch.tensor([]).cuda(), torch.tensor(
        []).cuda()  #Visual and Semantic embeddings for test dataset
    with torch.no_grad():
        for batch in data_loader_train:
            x, v = model(batch['images'].cuda(), batch['graphs'].to('cuda'))
            x_train, v_train = torch.cat((x_train, x)), torch.cat((v_train, v))
        for batch in data_loader_test:
            x, v = model(batch['images'].cuda(), batch['graphs'].to('cuda'))
            x_test, v_test = torch.cat((x_test, x)), torch.cat((v_test, v))

    x_train, v_train = x_train.cpu().detach().numpy(), v_train.cpu().detach(
    ).numpy()
    x_test, v_test = x_test.cpu().detach().numpy(), v_test.cpu().detach(
    ).numpy()

    for idx in check_indices:
        scene_name_gt = scene_names_test[idx]

        scores = x_train @ v_test[idx]
        assert len(scores) == len(dataset_train)
        sorted_indices = np.argsort(-1.0 * scores)  #Sort high->low

        #embedding_diffs=x_train-v_test[idx]
        #embedding_diffs=np.linalg.norm(embedding_diffs,axis=1) #CARE: Embedding-diffs can be big compared to NetVLAD-diffs
        #sorted_indices=np.argsort(embedding_diffs) #Sort low->high

        pos_dists = np.linalg.norm(image_positions_train[:] -
                                   image_positions_test[idx],
                                   axis=1)  #CARE: also adds z-distance
        ori_dists = np.abs(image_orientations_train[:] -
                           image_orientations_test[idx])
        ori_dists = np.minimum(ori_dists, 2 * np.pi - ori_dists)

        retrieval_dict[idx] = sorted_indices[0:np.max(top_k)]

        for k in top_k:
            scene_correct = np.array([
                scene_name_gt == scene_names_train[retrieved_index]
                for retrieved_index in sorted_indices[0:k]
            ])
            topk_pos_dists = pos_dists[sorted_indices[0:k]]
            topk_ori_dists = ori_dists[sorted_indices[0:k]]

            #Append the average pos&ori. errors *for the cases that the scene was hit*
            pos_results[k].append(
                np.mean(topk_pos_dists[scene_correct == True]
                        ) if np.sum(scene_correct) > 0 else None)
            ori_results[k].append(
                np.mean(topk_ori_dists[scene_correct == True]
                        ) if np.sum(scene_correct) > 0 else None)
            scene_results[k].append(
                np.mean(scene_correct))  #Always append the scene-scores

    assert len(pos_results[k]) == len(ori_results[k]) == len(
        scene_results[k]) == len(check_indices)

    print('Saving retrieval results...')
    pickle.dump(retrieval_dict, open('retrievals_VGE.pkl', 'wb'))

    #Turn off graph data
    dataset_train.return_graph_data = False
    dataset_test.return_graph_data = False

    return evaluate_topK(pos_results, ori_results, scene_results)
示例#3
0
def netvlad_scenegraphs2viewobjects(data_loader_train,
                                    data_loader_test,
                                    model,
                                    top_k=(1, 3, 5, 10),
                                    combine='sum',
                                    check_count='all'):
    print(
        f'#Check: {check_count}, # training: {len(data_loader_train.dataset)}, # test: {len(data_loader_test.dataset)}'
    )

    assert combine in ('sum', 'mean', 'multiply')
    print('COMBINING STRATEGY:', combine)

    retrieval_dict = {}

    dataset_train = data_loader_train.dataset
    dataset_test = data_loader_test.dataset

    image_positions_train, image_orientations_train = data_loader_train.dataset.image_positions, data_loader_train.dataset.image_orientations
    image_positions_test, image_orientations_test = data_loader_test.dataset.image_positions, data_loader_test.dataset.image_orientations
    scene_names_train = data_loader_train.dataset.image_scene_names
    scene_names_test = data_loader_test.dataset.image_scene_names

    pos_results = {k: [] for k in top_k}
    ori_results = {k: [] for k in top_k}
    scene_results = {k: [] for k in top_k}

    if check_count == 'all':
        print('evaluating all indices...')
        check_indices = np.arange(len(data_loader_test.dataset))
    else:
        print('evaluating random indices...')
        check_indices = np.random.randint(len(data_loader_test.dataset),
                                          size=check_count)

    print('Building NetVLAD vectors...')
    netvlad_vectors_train, netvlad_vectors_test = torch.tensor(
        []).cuda(), torch.tensor([]).cuda()
    with torch.no_grad():
        for i_batch, batch in enumerate(data_loader_test):
            a = batch
            a_out = model(a.cuda())
            netvlad_vectors_test = torch.cat((netvlad_vectors_test, a_out))
        for i_batch, batch in enumerate(data_loader_train):
            a = batch
            a_out = model(a.cuda())
            netvlad_vectors_train = torch.cat((netvlad_vectors_train, a_out))

    netvlad_vectors_train = netvlad_vectors_train.cpu().detach().numpy()
    netvlad_vectors_test = netvlad_vectors_test.cpu().detach().numpy()

    for idx in check_indices:
        scene_name_gt = scene_names_test[idx]

        netvlad_diffs = netvlad_vectors_train - netvlad_vectors_test[idx]
        netvlad_diffs = np.linalg.norm(netvlad_diffs, axis=1)

        #Score query SG vs. database scenes
        scene_graph = dataset_test.view_scenegraphs[idx]
        scores = np.zeros(len(dataset_train))
        for i in range(len(dataset_train)):
            score, _ = score_sceneGraph_to_viewObjects_nnRels(
                scene_graph, dataset_train.view_objects[i])
            scores[i] = score

        assert len(scores) == len(netvlad_diffs)
        #print('NV-Diff max', np.max(netvlad_diffs), 'SG-Diff score-max', np.max(scores))

        if combine == 'sum':
            combined_scores = scores + -1.0 * netvlad_diffs
            sorted_indices = np.argsort(-1.0 * combined_scores)  #High->Low
        if combine == 'multiply':
            combined_scores = (1.0 - scores) * netvlad_diffs
            sorted_indices = np.argsort(combined_scores)  #Low->High
        #Sort highest->lowest scores (np.argsort sorts lowest->highest)

        pos_dists = np.linalg.norm(image_positions_train[:] -
                                   image_positions_test[idx],
                                   axis=1)  #CARE: also adds z-distance
        ori_dists = np.abs(image_orientations_train[:] -
                           image_orientations_test[idx])
        ori_dists = np.minimum(ori_dists, 2 * np.pi - ori_dists)

        retrieval_dict[idx] = sorted_indices[0:np.max(top_k)]

        for k in top_k:
            #scene_correct=np.array([scene_name_gt == data_loader_train.dataset.get_scene_name(retrieved_index) for retrieved_index in sorted_indices[0:k]])
            scene_correct = np.array([
                scene_name_gt == scene_names_train[retrieved_index]
                for retrieved_index in sorted_indices[0:k]
            ])
            topk_pos_dists = pos_dists[sorted_indices[0:k]]
            topk_ori_dists = ori_dists[sorted_indices[0:k]]

            #Append the average pos&ori. errors *for the cases that the scene was hit*
            pos_results[k].append(
                np.mean(topk_pos_dists[scene_correct == True]
                        ) if np.sum(scene_correct) > 0 else None)
            ori_results[k].append(
                np.mean(topk_ori_dists[scene_correct == True]
                        ) if np.sum(scene_correct) > 0 else None)
            scene_results[k].append(
                np.mean(scene_correct))  #Always append the scene-scores

    assert len(pos_results[k]) == len(ori_results[k]) == len(
        scene_results[k]) == len(check_indices)

    print('Saving retrieval results...')
    pickle.dump(retrieval_dict, open('retrievals_NV_SG-Match.pkl', 'wb'))

    return evaluate_topK(pos_results, ori_results, scene_results)
示例#4
0
def netvlad_retrieval(data_loader_train,
                      data_loader_test,
                      model,
                      top_k=(1, 3, 5, 10),
                      random_features=False,
                      check_count='all'):
    print(
        f'#Check: {check_count}, # training: {len(data_loader_train.dataset)}, # test: {len(data_loader_test.dataset)}'
    )

    retrieval_dict = {}

    if random_features:
        print('Using random vectors (sanity check)')
        netvlad_vectors_train = np.random.rand(len(data_loader_train.dataset),
                                               2)
        netvlad_vectors_test = np.random.rand(len(data_loader_test.dataset), 2)
    else:
        print('Building NetVLAD vectors...')
        netvlad_vectors_train, netvlad_vectors_test = torch.tensor(
            []).cuda(), torch.tensor([]).cuda()

        with torch.no_grad():
            for i_batch, batch in enumerate(data_loader_test):
                a = batch
                a_out = model(a.cuda())
                netvlad_vectors_test = torch.cat((netvlad_vectors_test, a_out))
            for i_batch, batch in enumerate(data_loader_train):
                a = batch
                a_out = model(a.cuda())
                netvlad_vectors_train = torch.cat(
                    (netvlad_vectors_train, a_out))

        netvlad_vectors_train = netvlad_vectors_train.cpu().detach().numpy()
        netvlad_vectors_test = netvlad_vectors_test.cpu().detach().numpy()

    image_positions_train, image_orientations_train = data_loader_train.dataset.image_positions, data_loader_train.dataset.image_orientations
    image_positions_test, image_orientations_test = data_loader_test.dataset.image_positions, data_loader_test.dataset.image_orientations
    scene_names_train = data_loader_train.dataset.image_scene_names
    scene_names_test = data_loader_test.dataset.image_scene_names

    #Sanity check
    #netvlad_vectors_train, netvlad_vectors_test, image_positions_train, image_positions_test, image_orientations_train, image_orientations_test, scene_names_train, scene_names_test=generate_sanity_check_dataset()

    pos_results = {k: [] for k in top_k}
    ori_results = {k: [] for k in top_k}
    scene_results = {k: [] for k in top_k}

    if check_count == 'all':
        print('evaluating all indices...')
        check_indices = np.arange(len(data_loader_test.dataset))
    else:
        print('evaluating random indices...')
        check_indices = np.random.randint(len(data_loader_test.dataset),
                                          size=check_count)

    for idx in check_indices:
        scene_name_gt = scene_names_test[idx]

        netvlad_diffs = netvlad_vectors_train - netvlad_vectors_test[idx]
        netvlad_diffs = np.linalg.norm(netvlad_diffs, axis=1)

        sorted_indices = np.argsort(netvlad_diffs)
        pos_dists = np.linalg.norm(image_positions_train[:] -
                                   image_positions_test[idx],
                                   axis=1)  #CARE: also adds z-distance
        ori_dists = np.abs(image_orientations_train[:] -
                           image_orientations_test[idx])
        ori_dists = np.minimum(ori_dists, 2 * np.pi - ori_dists)

        retrieval_dict[idx] = sorted_indices[0:np.max(top_k)]

        for k in top_k:
            #scene_correct=np.array([scene_name_gt == data_loader_train.dataset.get_scene_name(retrieved_index) for retrieved_index in sorted_indices[0:k]])
            scene_correct = np.array([
                scene_name_gt == scene_names_train[retrieved_index]
                for retrieved_index in sorted_indices[0:k]
            ])
            topk_pos_dists = pos_dists[sorted_indices[0:k]]
            topk_ori_dists = ori_dists[sorted_indices[0:k]]

            #Append the average pos&ori. errors *for the cases that the scene was hit*
            pos_results[k].append(
                np.mean(topk_pos_dists[scene_correct == True]
                        ) if np.sum(scene_correct) > 0 else None)
            ori_results[k].append(
                np.mean(topk_ori_dists[scene_correct == True]
                        ) if np.sum(scene_correct) > 0 else None)
            scene_results[k].append(
                np.mean(scene_correct))  #Always append the scene-scores

    assert len(pos_results[k]) == len(ori_results[k]) == len(
        scene_results[k]) == len(check_indices)

    print('Saving retrieval results...')
    pickle.dump(retrieval_dict, open('retrievals_NetVLAD.pkl', 'wb'))

    return evaluate_topK(pos_results, ori_results, scene_results)
示例#5
0
            #scene_correct=np.array([scene_name_gt == data_loader_train.dataset.get_scene_name(retrieved_index) for retrieved_index in sorted_indices[0:k]])
            scene_correct=np.array([scene_name_gt == scene_names_train[retrieved_index] for retrieved_index in sorted_indices[0:k]])
            topk_pos_dists=pos_dists[sorted_indices[0:k]]
            topk_ori_dists=ori_dists[sorted_indices[0:k]]    

            #Append the average pos&ori. errors *for the cases that the scene was hit*
            pos_results[k].append( np.mean( topk_pos_dists[scene_correct==True]) if np.sum(scene_correct)>0 else None )
            ori_results[k].append( np.mean( topk_ori_dists[scene_correct==True]) if np.sum(scene_correct)>0 else None )
            scene_results[k].append( np.mean(scene_correct) ) #Always append the scene-scores
    
    assert len(pos_results[k])==len(ori_results[k])==len(scene_results[k])==len(check_indices)

    #print('Saving retrieval results...')
    pickle.dump(retrieval_dict, open('retrievals_NV-Pitts_sem3d.pkl','wb'))

    results=evaluate_topK(pos_results, ori_results, scene_results)    
    print(results)

    # torch.save(model, 'netvlad_pittsburgh.pth')
    # print('Model saved!')
    # input=torch.rand(10,3,1000,1000).cuda()
    # image_encoding = model.encoder(input)
    # vlad_encoding = model.pool(image_encoding) 
    # print('out',vlad_encoding.shape)


    # if opt.mode.lower() == 'test':
    #     print('===> Running evaluation step')
    #     epoch = 1
    #     recalls = test(whole_test_set, epoch, write_tboard=False)
    # elif opt.mode.lower() == 'cluster':
示例#6
0
def eval_graph2graph(dataset_train,
                     dataset_test,
                     embedding_train,
                     embedding_test,
                     top_k=(1, 3, 5, 10),
                     similarity='l2'):
    assert similarity in ('l2', )
    print('\n evaluate_netvlad_predictions():', similarity)

    image_positions_train, image_orientations_train = dataset_train.image_positions, dataset_train.image_orientations
    image_positions_test, image_orientations_test = dataset_test.image_positions, dataset_test.image_orientations
    scene_names_train = dataset_train.image_scene_names
    scene_names_test = dataset_test.image_scene_names

    retrieval_dict = {}

    pos_results = {k: [] for k in top_k}
    ori_results = {k: [] for k in top_k}
    scene_results = {k: [] for k in top_k}

    check_indices = np.arange(len(dataset_test))
    for idx in check_indices:
        scene_name_gt = scene_names_test[idx]

        #CARE: Make sure to use the correct similarity
        if similarity == 'l2':  #L2-difference, e.g. from TipletMarginLoss
            vector_diffs = embedding_train - embedding_test[idx]
            vector_diffs = np.linalg.norm(vector_diffs, axis=1)
            sorted_indices = np.argsort(vector_diffs)  #Low->High differences

        assert len(sorted_indices) == len(dataset_train)

        pos_dists = np.linalg.norm(image_positions_train[:] -
                                   image_positions_test[idx],
                                   axis=1)  #CARE: also adds z-distance
        ori_dists = np.abs(image_orientations_train[:] -
                           image_orientations_test[idx])
        ori_dists = np.minimum(ori_dists, 2 * np.pi - ori_dists)

        retrieval_dict[idx] = sorted_indices[0:np.max(top_k)]

        for k in top_k:
            scene_correct = np.array([
                scene_name_gt == scene_names_train[retrieved_index]
                for retrieved_index in sorted_indices[0:k]
            ])
            topk_pos_dists = pos_dists[sorted_indices[0:k]]
            topk_ori_dists = ori_dists[sorted_indices[0:k]]

            #Append the average pos&ori. errors *for the cases that the scene was hit*
            pos_results[k].append(
                np.mean(topk_pos_dists[scene_correct == True]
                        ) if np.sum(scene_correct) > 0 else None)
            ori_results[k].append(
                np.mean(topk_ori_dists[scene_correct == True]
                        ) if np.sum(scene_correct) > 0 else None)
            scene_results[k].append(
                np.mean(scene_correct))  #Always append the scene-scores

    assert len(pos_results[k]) == len(ori_results[k]) == len(
        scene_results[k]) == len(check_indices)

    print('Saving retrieval results...')
    pickle.dump(retrieval_dict,
                open(f'retrievals_PureGE_{similarity}.pkl', 'wb'))

    return evaluate_topK(pos_results, ori_results, scene_results)
示例#7
0
def eval_netvlad_retrieval_old(dataset_train,
                               dataset_test,
                               netvlad_vectors_train,
                               netvlad_vectors_test,
                               top_k=(1, 3, 5, 10),
                               reduce_indices=None):
    assert reduce_indices in (None, 'scene-voting', 'scene-voting-double-k')
    print(
        f'eval_netvlad_retrieval(): # training: {len(dataset_train)}, # test: {len(dataset_test)}'
    )
    print('Reduce indices:', reduce_indices)

    retrieval_dict = {}

    image_positions_train, image_orientations_train = dataset_train.image_positions, dataset_train.image_orientations
    image_positions_test, image_orientations_test = dataset_test.image_positions, dataset_test.image_orientations
    scene_names_train = dataset_train.image_scene_names
    scene_names_test = dataset_test.image_scene_names

    #Sanity check
    #netvlad_vectors_train, netvlad_vectors_test, image_positions_train, image_positions_test, image_orientations_train, image_orientations_test, scene_names_train, scene_names_test=generate_sanity_check_dataset()

    pos_results = {k: [] for k in top_k}
    ori_results = {k: [] for k in top_k}
    scene_results = {k: [] for k in top_k}

    test_indices = np.arange(len(dataset_test))
    for test_index in test_indices:
        scene_name_gt = scene_names_test[test_index]

        netvlad_diffs = netvlad_vectors_train - netvlad_vectors_test[test_index]
        netvlad_diffs = np.linalg.norm(netvlad_diffs, axis=1)

        pos_dists = np.linalg.norm(image_positions_train[:] -
                                   image_positions_test[test_index],
                                   axis=1)  #CARE: also adds z-distance
        ori_dists = np.abs(image_orientations_train[:] -
                           image_orientations_test[test_index])
        ori_dists = np.minimum(ori_dists, 2 * np.pi - ori_dists)

        for k in top_k:
            if reduce_indices is None:
                sorted_indices = np.argsort(netvlad_diffs)[
                    0:k]  #Sanity still same result ✓
            if reduce_indices == 'scene-voting':
                sorted_indices = np.argsort(netvlad_diffs)[0:k]
                sorted_indices = evaluation.utils.reduceIndices_sceneVoting(
                    scene_names_train, sorted_indices)
            if reduce_indices == 'scene-voting-double-k':
                sorted_indices = np.argsort(netvlad_diffs)[0:k]
                sorted_indices_voting = np.argsort(
                    netvlad_diffs
                )[k:2 *
                  k]  # Take another k top retrievals just for scene-voting to compare to combined models
                sorted_indices_topK, sorted_indices_doubleK = evaluation.utils.reduceIndices_sceneVoting(
                    scene_names_train, sorted_indices, sorted_indices_voting)
                sorted_indices = sorted_indices_topK if len(
                    sorted_indices_topK
                ) > 0 else sorted_indices_doubleK  #Same logic as in visual_geometric, trust next indices if they are "united" enough to over-rule top-k

            if k == np.max(top_k): retrieval_dict[test_index] = sorted_indices

            scene_correct = np.array([
                scene_name_gt == scene_names_train[retrieved_index]
                for retrieved_index in sorted_indices
            ])
            topk_pos_dists = pos_dists[sorted_indices]
            topk_ori_dists = ori_dists[sorted_indices]

            #Append the average pos&ori. errors *for the cases that the scene was hit*
            pos_results[k].append(
                np.mean(topk_pos_dists[scene_correct == True]
                        ) if np.sum(scene_correct) > 0 else None)
            ori_results[k].append(
                np.mean(topk_ori_dists[scene_correct == True]
                        ) if np.sum(scene_correct) > 0 else None)
            scene_results[k].append(
                np.mean(scene_correct))  #Always append the scene-scores

    assert len(pos_results[k]) == len(ori_results[k]) == len(
        scene_results[k]) == len(test_indices)

    print('Saving retrieval results...')
    pickle.dump(retrieval_dict, open('retrievals_NV-S3D.pkl', 'wb'))

    return evaluate_topK(pos_results, ori_results, scene_results)