コード例 #1
0
def evaluate(model_test):
    model_test.eval()
    total_correct = 0

    data_eval = shapenet_dataset(datalist_path=args.data_eval)
    eval_loader = torch.utils.data.DataLoader(data_eval,
                                              num_workers=4,
                                              batch_size=4,
                                              shuffle=True,
                                              collate_fn=pts_collate_seg)
    print("dataset size:", len(eval_loader.dataset))

    for batch_idx, (pts, label, seg) in enumerate(eval_loader):
        ## pts [N,P,3] label [N,] seg [N,P]
        if is_GPU:
            pts = Variable(pts.cuda())
            label = Variable(label.cuda())
            seg_label = Variable(seg.cuda())
        else:
            pts = Variable(pts)
            label = Variable(label)
            seg_label = Variable(seg)

        ## pred [N,50,P]  trans [N,64,64]
        pred, trans = net(pts)

        _, pred_index = torch.max(pred, dim=1)  ##[N,P]
        num_correct = (pred_index.eq(seg_label)).data.cpu().sum()
        total_correct += num_correct.item()

    print('the average correct rate:{}'.format(
        total_correct * 1.0 / (len(eval_loader.dataset) * 2048)))

    model_test.train()
    with open(logname, 'a') as f:
        f.write('\nthe evaluate average accuracy:{}'.format(
            total_correct * 1.0 / (len(eval_loader.dataset) * 2048)))
コード例 #2
0
def evaluate(model_test):
    model_test.eval()
    total_correct = 0

    data_eval = shapenet_dataset(root=args.data_eval,
                                 classification=True,
                                 train=False)
    eval_loader = torch.utils.data.DataLoader(data_eval,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=4,
                                              collate_fn=pts_collate)
    print("dataset size:", len(eval_loader.dataset))

    for batch_idx, (split_dims, pts, label) in enumerate(eval_loader):
        t1 = time.time()
        print('batch {}'.format(batch_idx))
        if is_GPU:
            pts = Variable(pts.cuda())
            label = Variable(label.cuda())
        else:
            pts = Variable(pts)
            label = Variable(label)
        pred = net(pts, split_dims)

        _, pred_index = torch.max(pred, dim=1)
        num_correct = (pred_index.eq(label)).data.cpu().sum().item()
        total_correct += num_correct

    print('the average correct rate:{}'.format(total_correct * 1.0 /
                                               (len(eval_loader.dataset))))

    model_test.train()
    with open(logname, 'a') as f:
        f.write('\nthe evaluate average accuracy:{}\n'.format(
            total_correct * 1.0 / (len(eval_loader.dataset))))
コード例 #3
0
def evaluate(model_test):
    model_test.eval()
    total_correct = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_mIOU_class = [0 for _ in range(NUM_CLASSES)]

    data_eval = shapenet_dataset(datalist_path=args.data_eval)
    eval_loader = torch.utils.data.DataLoader(data_eval,num_workers=4,
                                  batch_size=4, shuffle=True, collate_fn=pts_collate_seg)
    print("dataset size:", len(eval_loader.dataset))

    for batch_idx, (pts, label, seg) in enumerate(eval_loader):
        ## pts [N,P,3] label [N,] seg [N,P]
        if is_GPU:
            pts = Variable(pts.cuda())
            label = Variable(label.cuda())
            seg_label = Variable(seg.cuda())
        else:
            pts = Variable(pts)
            label = Variable(label)
            seg_label = Variable(seg)

        ## pred [N,50,P]  trans [N,64,64]
        pred, trans = net(pts)

        _, pred_index = torch.max(pred, dim=1)  ##[N,P]
        num_correct = (pred_index.eq(seg_label)).data.cpu().sum()
        print('in batch{} acc={}'.format(batch_idx, num_correct.item() * 1.0 / (4 * 2048)))
        total_correct += num_correct.item()

        ################
        ## compute mIOU
        iou_batch = []
        for i in range(pred.size()[0]):  ## B
            iou_pc = []
            for part in part_label[label[i]]:  ## for each shape
                gt = (seg[i] == part)  ## gt of this part_idx
                predict = (pred_index[i] == part).cpu()

                intersection = (gt + predict) == 2
                union = (gt + predict) >= 1

                # print(intersection)
                # print(union)
                # assert False

                if union.sum() == 0:
                    iou_part = 1.0
                else:
                    iou_part = intersection.int().sum().item() / (union.int().sum().item() + 0.0001)

                iou_pc.append(iou_part)
                ##np.asarray(iou_pc).mean()  the mIOU of this shape

            #iou_batch.append(np.asarray(iou_pc).mean())
            total_mIOU_class[label[i]] += np.asarray(iou_pc).mean()
            total_seen_class[label[i]] += 1
    ## mIOU of each class
    mIOU_class = np.array(total_mIOU_class) / np.array(total_seen_class,dtype=np.float)

    print ('##############################################################')
    print('the average correct rate:{}'.format(total_correct * 1.0 / (len(eval_loader.dataset)*2048)))
    print('the mean IOU overall :{}'.format((mIOU_class * weight_cls).sum()))
    print ('##############################################################')
    print ('mIOU of classes')
    for i,cls in enumerate(data_eval.classname):
        print ('{}: {}'.format(cls,mIOU_class[i]))
コード例 #4
0
                    metavar='LR',
                    help='initial learning rate')

parser.add_argument('--resume',
                    default='checkpoint.pth',
                    type=str,
                    metavar='PATH',
                    help='path to latest checkpoint ')

args = parser.parse_args()
logname = args.log

if is_GPU:
    torch.cuda.set_device(args.gpu)

my_dataset = shapenet_dataset(root=args.data, classification=True)
data_loader = torch.utils.data.DataLoader(my_dataset,
                                          batch_size=args.batch_size,
                                          shuffle=True,
                                          num_workers=4,
                                          collate_fn=pts_collate)

net = KDNet()
if is_GPU:
    net = net.cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, betas=(0.5, 0.999))
critenrion = nn.NLLLoss()


def save_checkpoint(epoch, model, num_iter):
    torch.save(
コード例 #5
0
        x = self.conv5(x)  ##[N,1024,P]

        global_feature = torch.squeeze(self.max_pool(x))  ##[N,1024]
        global_feature = global_feature.view(-1, 1024, 1).repeat(1, 1, self.num_pts)

        pts_seg_feature = torch.cat([global_feature, pts_feature_align], dim=1) ##[N,1088,P]
        scores = self.classifer(pts_seg_feature)  ##[N,50,P]
        pred = F.log_softmax(scores, dim=1)

        return pred,trans2


if __name__ == '__main__':
    from data_utils import shapenet_dataset, pts_collate_seg

    my_dataset = shapenet_dataset('/home/gaoyuzhe/Downloads/3d_data/hdf5_data/test_hdf5_file_list.txt')
    data_loader = torch.utils.data.DataLoader(my_dataset,
                                              batch_size=2, shuffle=True, collate_fn=pts_collate_seg)

    net = PointNet_seg()
    for batch_idx, (pts, label, seg) in enumerate(data_loader):
        if False:
            pts = Variable(pts.cuda())
            label = Variable(label.cuda())
        else:
            pts = Variable(pts)
            label = Variable(label)
        pred,trans = net(pts)
        print (pred.size())
        print (trans.size())
        exit()
コード例 #6
0
def evaluate(model_test):
    NUM_CLASSES = 16

    part_label = [[0, 1, 2, 3], [4, 5], [6, 7], [8, 9, 10,
                                                 11], [12, 13, 14, 15],
                  [16, 17, 18], [19, 20, 21], [22, 23], [24, 25, 26, 27],
                  [28, 29], [30, 31, 32, 33, 34, 35], [36, 37], [38, 39, 40],
                  [41, 42, 43], [44, 45, 46], [47, 48, 49]]

    num_dataset_cls = np.array(
        [341, 14, 11, 158, 704, 14, 159, 80, 286, 83, 51, 38, 44, 12, 31, 848])
    weight_cls = num_dataset_cls.astype(
        np.float32) * 1.0 / num_dataset_cls.sum().astype(np.float32)

    model_test.eval()
    total_correct = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_mIOU_class = [0 for _ in range(NUM_CLASSES)]

    data_eval = shapenet_dataset(datalist_path=args.data_eval)
    eval_loader = torch.utils.data.DataLoader(data_eval,
                                              num_workers=4,
                                              batch_size=4,
                                              shuffle=True,
                                              collate_fn=pts_collate_seg)
    print("dataset size:", len(eval_loader.dataset))

    for batch_idx, (pts, label, seg) in enumerate(eval_loader):
        ## pts [N,P,3] label [N,] seg [N,P]
        if is_GPU:
            pts = Variable(pts.cuda())
            label = Variable(label.cuda())
            seg_label = Variable(seg.cuda())
        else:
            pts = Variable(pts)
            label = Variable(label)
            seg_label = Variable(seg)

        ## pred [N,50,P]  trans [N,64,64]
        pred = net(pts)

        _, pred_index = torch.max(pred, dim=1)  ##[N,P]
        num_correct = (pred_index.eq(seg_label)).data.cpu().sum()
        print('in batch{} acc={}'.format(batch_idx,
                                         num_correct.item() * 1.0 /
                                         (4 * 2048)))
        total_correct += num_correct.item()

        ################
        ## compute mIOU
        iou_batch = []
        for i in range(pred.size()[0]):  ## B
            iou_pc = []
            for part in part_label[label[i]]:  ## for each shape
                gt = (seg[i] == part)  ## gt of this part_idx
                predict = (pred_index[i] == part).cpu()

                intersection = (gt + predict) == 2
                union = (gt + predict) >= 1

                # print(intersection)
                # print(union)
                # assert False

                if union.sum() == 0:
                    iou_part = 1.0
                else:
                    iou_part = intersection.int().sum().item() / (
                        union.int().sum().item() + 0.0001)

                iou_pc.append(iou_part)
                ##np.asarray(iou_pc).mean()  the mIOU of this shape

            #iou_batch.append(np.asarray(iou_pc).mean())
            total_mIOU_class[label[i]] += np.asarray(iou_pc).mean()
            total_seen_class[label[i]] += 1
    ## mIOU of each class
    mIOU_class = np.array(total_mIOU_class) / np.array(total_seen_class,
                                                       dtype=np.float)

    with open(logname, 'a') as f:
        f.write(
            '##############################################################\n')
        f.write('the average correct rate:{}\n'.format(
            total_correct * 1.0 / (len(eval_loader.dataset) * 2048)))
        f.write('the mean IOU overall :{}\n'.format(
            (mIOU_class * weight_cls).sum()))
        f.write(
            '##############################################################\n')
コード例 #7
0
    resume = os.path.join(LOG_DIR, "checkpoint.pth")
else:
    resume = args.resume

logname = os.path.join(LOG_DIR, 'log.txt')
optfile = os.path.join(LOG_DIR, 'opt.txt')
with open(optfile, 'wt') as opt_f:
    opt_f.write('------------ Options -------------\n')
    for k, v in sorted(vars(args).items()):
        opt_f.write('%s: %s\n' % (str(k), str(v)))
    opt_f.write('-------------- End ----------------\n')

if is_GPU:
    torch.cuda.set_device(args.gpu)

my_dataset = shapenet_dataset(args.data)
data_loader = torch.utils.data.DataLoader(my_dataset,
                                          batch_size=args.batch_size,
                                          shuffle=True,
                                          num_workers=4,
                                          collate_fn=pts_collate_seg)

net = pointnet2_seg(input_dim=3)
if is_GPU:
    net = net.cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, betas=(0.9, 0.999))
critenrion = nn.NLLLoss()


def save_checkpoint(epoch, model, num_iter):
    torch.save(