Пример #1
0
def test(model, model_fn, data_name, epoch):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    try:
        os.mkdir('result')
    except:
        pass
    try:
        os.mkdir('result/pred')
    except:
        pass

    from data.scannetv2_inst import Dataset
    dataset = Dataset(test=True)
    dataset.testLoader()

    dataloader = dataset.test_data_loader

    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()
    with torch.no_grad():
        model = model.eval()
        start = time.time()

        matches = {}
        for i, batch in enumerate(dataloader):
            N = batch['feats'].shape[0]
            test_scene_name = dataset.test_file_names[int(
                batch['id'][0])].split('/')[-1][:12]

            start1 = time.time()
            preds = model_fn(batch, model, epoch)
            end1 = time.time() - start1

            semantic_pred = preds['semantic_crf']
            feat = preds['feat'].detach().cpu().numpy()

            semantic_pred = semantic_pred.detach().cpu().numpy()

            labels = batch['labels'].detach().cpu().numpy()

            f1 = open('result/pred/' + test_scene_name + '.txt', 'w')
            for j in range(labels.shape[0]):
                f1.write(str(int(semantic_pred[j])) + '\n')
            f1.close()
            '''intersection, union, target = intersectionAndUnion(semantic_pred, labels, 20)
Пример #2
0
def test(model, model_fn, data_name, epoch):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')

    if cfg.dataset == 'scannetv2':
        from data.scannetv2_inst import Dataset
        dataset = Dataset(test=True)
        dataset.testLoader()
    elif cfg.dataset == 'DALES':
        from data.DALES_inst import Dataset
        dataset = Dataset(test=True)
        dataset.testLoader()
    elif cfg.dataset == "DALESmini":
        from data.DALESmini_inst import Dataset
        dataset = Dataset(test=True)
        dataset.testLoader()
    elif cfg.dataset == 'DALEStext':
        from data.DALEStext_inst import Dataset
        dataset = Dataset(test=True)
        dataset.testLoader()
    else:
        print("Error: no data loader for {}".format(cfg.dataset))
        exit(0)

    dataloader = dataset.test_data_loader

    with torch.no_grad():
        model = model.eval()
        start = time.time()

        matches = {}
        for i, batch in enumerate(dataloader):
            N = batch['feats'].shape[0]
            test_scene_name = dataset.test_file_names[int(
                batch['id'][0])].split('/')[-1][:21]

            start1 = time.time()
            try:
                preds = model_fn(batch, model, epoch)
            except:
                print("Error making preds on {}".format(test_scene_name))
                import ipdb
                ipdb.set_trace(context=10)
            end1 = time.time() - start1

            ##### get predictions (#1 semantic_pred, pt_offsets; #2 scores, proposals_pred)
            semantic_scores = preds['semantic']  # (N, nClass=20) float32, cuda
            semantic_pred = semantic_scores.max(1)[1]  # (N) long, cuda

            pt_offsets = preds['pt_offsets']  # (N, 3), float32, cuda

            if (epoch > cfg.prepare_epochs):
                scores = preds['score']  # (nProposal, 1) float, cuda
                scores_pred = torch.sigmoid(scores.view(-1))

                proposals_idx, proposals_offset = preds['proposals']
                # proposals_idx: (sumNPoint, 2), int, cpu, dim 0 for cluster_id, dim 1 for corresponding point idxs in N
                # proposals_offset: (nProposal + 1), int, cpu
                proposals_pred = torch.zeros(
                    (proposals_offset.shape[0] - 1, N),
                    dtype=torch.int,
                    device=scores_pred.device)  # (nProposal, N), int, cuda
                proposals_pred[proposals_idx[:, 0].long(),
                               proposals_idx[:, 1].long()] = 1

                semantic_id = torch.tensor(
                    semantic_label_idx,
                    device=scores_pred.device)[semantic_pred[
                        proposals_idx[:, 1][proposals_offset[:-1].long()].long(
                        )]]  # (nProposal), long

                ##### score threshold
                score_mask = (scores_pred > cfg.TEST_SCORE_THRESH)
                scores_pred = scores_pred[score_mask]
                proposals_pred = proposals_pred[score_mask]
                semantic_id = semantic_id[score_mask]

                ##### npoint threshold
                proposals_pointnum = proposals_pred.sum(1)
                npoint_mask = (proposals_pointnum > cfg.TEST_NPOINT_THRESH)
                scores_pred = scores_pred[npoint_mask]
                proposals_pred = proposals_pred[npoint_mask]
                semantic_id = semantic_id[npoint_mask]

                ##### nms
                if semantic_id.shape[0] == 0:
                    pick_idxs = np.empty(0)
                else:
                    proposals_pred_f = proposals_pred.float(
                    )  # (nProposal, N), float, cuda
                    intersection = torch.mm(
                        proposals_pred_f, proposals_pred_f.t(
                        ))  # (nProposal, nProposal), float, cuda
                    proposals_pointnum = proposals_pred_f.sum(
                        1)  # (nProposal), float, cuda
                    proposals_pn_h = proposals_pointnum.unsqueeze(-1).repeat(
                        1, proposals_pointnum.shape[0])
                    proposals_pn_v = proposals_pointnum.unsqueeze(0).repeat(
                        proposals_pointnum.shape[0], 1)
                    cross_ious = intersection / (proposals_pn_h +
                                                 proposals_pn_v - intersection)
                    pick_idxs = non_max_suppression(
                        cross_ious.cpu().numpy(),
                        scores_pred.cpu().numpy(),
                        cfg.TEST_NMS_THRESH)  # int, (nCluster, N)
                clusters = proposals_pred[pick_idxs]
                cluster_scores = scores_pred[pick_idxs]
                cluster_semantic_id = semantic_id[pick_idxs]

                # create a cluster for all points that have semantic label ground and no cluster
                ground = np.ones(np.array(clusters.cpu()).shape[1])
                for row in np.array(clusters.cpu(), dtype=np.uint32):
                    ground = np.where(row != 0, 0, ground)
                ground = np.where(
                    np.array(semantic_pred.cpu(), dtype=np.uint32) == 0,
                    ground, 0)

                # add the new ground cluster, it's score (0.0 because it wasn't predicted), and it's semantic id
                clusters = torch.cat(
                    (clusters, torch.from_numpy(ground).unsqueeze(0).type(
                        torch.IntTensor).cuda()))
                cluster_scores = torch.cat(
                    (cluster_scores, torch.tensor([0.0]).cuda()))
                cluster_semantic_id = torch.cat(
                    (cluster_semantic_id, torch.tensor([1]).cuda()))

                nclusters = clusters.shape[0]

                ##### prepare for evaluation
                if cfg.eval:
                    pred_info = {}
                    pred_info['conf'] = cluster_scores.cpu().numpy()
                    pred_info['label_id'] = cluster_semantic_id.cpu().numpy()
                    pred_info['mask'] = clusters.cpu().numpy()
                    gt_file = os.path.join(cfg.data_root, cfg.dataset,
                                           cfg.split + '_gt',
                                           test_scene_name + '.txt')
                    gt2pred, pred2gt = eval.assign_instances_for_scan(
                        test_scene_name, pred_info, gt_file)
                    matches[test_scene_name] = {}
                    matches[test_scene_name]['gt'] = gt2pred
                    matches[test_scene_name]['pred'] = pred2gt

            ##### save files
            start3 = time.time()
            if cfg.save_semantic:
                os.makedirs(os.path.join(result_dir, 'semantic'),
                            exist_ok=True)
                semantic_np = semantic_pred.cpu().numpy()
                np.save(
                    os.path.join(result_dir, 'semantic',
                                 test_scene_name + '.npy'), semantic_np)

            if cfg.save_pt_offsets:
                os.makedirs(os.path.join(result_dir, 'coords_offsets'),
                            exist_ok=True)
                pt_offsets_np = pt_offsets.cpu().numpy()
                coords_np = batch['locs_float'].numpy()
                coords_offsets = np.concatenate((coords_np, pt_offsets_np),
                                                1)  # (N, 6)
                np.save(
                    os.path.join(result_dir, 'coords_offsets',
                                 test_scene_name + '.npy'), coords_offsets)

            if (epoch > cfg.prepare_epochs and cfg.save_instance):
                try:
                    f = open(
                        os.path.join(result_dir, test_scene_name + '.txt'),
                        'w')
                    for proposal_id in range(nclusters):
                        clusters_i = clusters[proposal_id].cpu().numpy()  # (N)
                        semantic_label = np.argmax(
                            np.bincount(semantic_pred[np.where(
                                clusters_i == 1)[0]].cpu()))
                        score = cluster_scores[proposal_id]
                        f.write(
                            'predicted_masks/{}_{:03d}.txt {} {:.4f}'.format(
                                test_scene_name, proposal_id,
                                semantic_label_idx[semantic_label], score))
                        if proposal_id < nclusters - 1:
                            f.write('\n')
                        np.savetxt(os.path.join(
                            result_dir, 'predicted_masks',
                            test_scene_name + '_%03d.txt' % (proposal_id)),
                                   clusters_i,
                                   fmt='%d')
                    f.close()
                except:
                    print("Error with {}".format(test_scene_name))

            end3 = time.time() - start3
            end = time.time() - start
            start = time.time()

            ##### print
            logger.info(
                "instance iter: {}/{} point_num: {} ncluster: {} time: total {:.2f}s inference {:.2f}s save {:.2f}s"
                .format(batch['id'][0] + 1, len(dataset.test_files), N,
                        nclusters, end, end1, end3))

        ##### evaluation
        if cfg.eval:
            ap_scores = eval.evaluate_matches(matches)
            avgs = eval.compute_averages(ap_scores)
            eval.print_results(avgs)
Пример #3
0
def test(model, model_fn, data_name, epoch):

    try:
        os.mkdir(cfg.data_root + '/rel_pred')
    except:
        pass
    try:
        os.mkdir(cfg.data_root + '/rel_feat')
    except:
        pass
    try:
        os.mkdir('result/pred_train')
    except:
        pass

    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    from data.scannetv2_inst import Dataset
    dataset = Dataset(test=True)
    dataset.testLoader()

    dataloader = dataset.test_data_loader
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()
    with torch.no_grad():
        model = model.eval()
        start = time.time()

        matches = {}
        for i, batch in enumerate(dataloader):
            N = batch['feats'].shape[0]
            print(len(dataset.test_file_names), int(batch['id'][0]))
            test_scene_name = dataset.test_file_names[int(
                batch['id'][0] / 3)].split('/')[-1][:12]
            print(test_scene_name)

            start1 = time.time()
            preds = model_fn(batch, model, epoch)
            end1 = time.time() - start1

            semantic_scores = preds['products']

            if i % 3 == 0:
                features_acc = preds['feat'] * 0
                semantic_acc = semantic_scores * 0

            semantic_acc += semantic_scores
            features_acc += preds['feat']

            if i % 3 == 2:
                semantic_feat = features_acc / 3.0
                semantic_pred = semantic_acc.max(1)[
                    1]  # (N, nClass=20) float32, cuda
                feat = semantic_feat  #.detach().cpu().numpy()
                prod = semantic_acc  #preds['products']

                np.save(
                    cfg.data_root + '/rel_pred/' + test_scene_name + '.npy',
                    prod)
                np.save(
                    cfg.data_root + '/rel_feat/' + test_scene_name + '.npy',
                    feat)

                semantic_pred = semantic_pred.detach().cpu().numpy()
                labels = batch['labels'].detach().cpu().numpy()
                f1 = open('result/pred_train/' + test_scene_name + '.txt', 'w')

                for j in range(labels.shape[0]):
                    f1.write(str(semantic_pred[j]) + '\n')
                f1.close()
Пример #4
0
def test(model, model_fn, data_name, epoch):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')

    from data.scannetv2_inst import Dataset
    dataset = Dataset(test=True)
    dataset.testLoader()

    dataloader = dataset.test_data_loader

    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    try:
        os.mkdir(cfg.data_root + '/train' + str(conf_thres))
    except:
        pass

    with torch.no_grad():
        model = model.eval()
        start = time.time()

        matches = {}
        for i, batch in enumerate(dataloader):
            N = batch['feats'].shape[0]
            test_scene_name = dataset.test_file_names[int(
                batch['id'][0])].split('/')[-1][:12]

            start1 = time.time()
            preds = model_fn(batch, model, epoch)
            unary = np.amax(preds['unary'].cpu().numpy(), 1)

            end1 = time.time() - start1
            Q = torch.argmax(preds['Q'], 1).cpu().numpy()

            crf = preds['semantic_crf'].cpu().numpy()

            confs = preds['conf'].cpu().numpy()

            group_all = preds['group_full']
            group_to_point = preds['group2point_full']

            sem_labels = np.zeros(
                (unary.shape[0],
                 ))  #remapper[np.array(f2.elements[0]['label'])]
            sem_labels[:] = -100

            groups = []
            for i in range(20):
                groups.append([])

            cnt_group = 0
            cnt = len(group_all)

            for i in range(cnt):
                conf = confs[i]
                c = Q[i]
                if conf < conf_thres:  #
                    continue
                cnt_group += 1
                groups[c].append(group_all[i])
                idxs = group_to_point[group_all[i]]
                sem_labels[idxs] = c

            data = torch.load(cfg.data_root + '/train_weakly/' +
                              test_scene_name +
                              '_vh_clean_2.ply_inst_nostuff.pth')

            coords = data[0]
            colors = data[1]
            prev_sem = data[2]
            prev_groups = data[3]
            full_seg = data[4]

            prev_group_cnt = 0
            for g in range(len(prev_groups)):
                g2 = prev_groups[g]
                prev_group_cnt += len(g2)
                for i in g2:
                    if (i not in groups[g]) and (i in group_to_point.keys()):
                        groups[g].append(i)
                        idxs = group_to_point[i]
                        sem_labels[idxs] = g

            sem_labels[np.where(prev_sem != -100)] = prev_sem[np.where(
                prev_sem != -100)]

            torch.save((coords, colors, sem_labels, groups, full_seg),
                       cfg.data_root + '/train' + str(conf_thres) + '/' +
                       test_scene_name + '_inst_nostuff.pth')
Пример #5
0
def test(model, model_fn, data_name, epoch):
    #logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    try:
        os.mkdir(cfg.data_root + '/unary_pred')
    except:
        pass
    try:
        os.mkdir(cfg.data_root + '/unary_feat')
    except:
        pass
    try:
        os.mkdir('result')
    except:
        pass
    try:
        os.mkdir('result/pred')
    except:
        pass

    from data.scannetv2_inst import Dataset
    dataset = Dataset(test=True)
    dataset.testLoader()

    dataloader = dataset.test_data_loader
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()
    with torch.no_grad():
        model = model.eval()
        start = time.time()

        matches = {}
        for i, batch in enumerate(dataloader):
            N = batch['feats'].shape[0]
            test_scene_name = dataset.test_file_names[int(
                batch['id'][0] / 3)].split('/')[-1][:12]
            print(test_scene_name)

            start1 = time.time()
            preds = model_fn(batch, model, epoch)
            end1 = time.time() - start1

            semantic_scores = preds['semantic']

            if i % 3 == 0:
                semantic_acc = semantic_scores * 0

            semantic_acc += semantic_scores

            if i % 3 == 2:
                semantic_pred = semantic_acc.max(1)[1]  # (N) long, cuda
                semantic_pred = semantic_pred.detach().cpu().numpy()
                labels = batch['labels'].detach().cpu().numpy()  #[:int(N/3)]

                np.save(
                    cfg.data_root + '/unary_pred/' + test_scene_name + '.npy',
                    semantic_scores.detach().cpu().numpy())
                np.save(
                    cfg.data_root + '/unary_feat/' + test_scene_name + '.npy',
                    preds['feats'].detach().cpu().numpy())

                f1 = open('result/pred/' + test_scene_name + '.txt', 'w')
                for j in range(labels.shape[0]):
                    f1.write(str(semantic_pred[j]) + '\n')
                f1.close()