Exemplo n.º 1
0
def create_datasets_and_dataloaders(num_points=1024):
    assert (os.path.isdir(args.in_data_dir))
    #train_data = Dataset(path=args.in_data_dir, task='train',\
    #    num_samples=num_points, generalize=args.generalize, robustness=args.robustness)
    #test_data = Dataset(path=args.in_data_dir, task='test',\
    #    num_samples=num_points, generalize=args.generalize, robustness=args.robustness)
    train_data = ModelNet40(path=args.in_data_dir, task='train',\
        num_samples=num_points, generalize=args.generalize, robustness=args.robustness)
    test_data = ModelNet40(path=args.in_data_dir, task='test',\
        num_samples=num_points, generalize=args.generalize, robustness=args.robustness)

    print('# points: {:d}'.format(num_points))

    train_dataloader = torch.utils.data.DataLoader(train_data,
                                                   batch_size=args.batch_size,
                                                   shuffle=args.train,
                                                   num_workers=int(
                                                       args.n_workers))

    test_dataloader = torch.utils.data.DataLoader(test_data,
                                                  batch_size=args.batch_size,
                                                  shuffle=args.train,
                                                  num_workers=int(
                                                      args.n_workers))

    return train_data, train_dataloader, test_data, test_dataloader
Exemplo n.º 2
0
def main(args):
    torch.backends.cudnn.deterministic = True
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)

    boardio = SummaryWriter(log_dir='checkpoints/' + args.exp_name)
    _init_(args)

    textio = IOStream('checkpoints/' + args.exp_name + '/run.log')
    textio.cprint(str(args))

    if args.dataset == 'modelnet40':
        train_loader = DataLoader(ModelNet40(
            num_points=args.num_points,
            partition='train',
            gaussian_noise=args.gaussian_noise,
            unseen=args.unseen,
            factor=args.factor),
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  drop_last=True)
        test_loader = DataLoader(ModelNet40(num_points=args.num_points,
                                            partition='test',
                                            gaussian_noise=args.gaussian_noise,
                                            unseen=args.unseen,
                                            factor=args.factor),
                                 batch_size=args.test_batch_size,
                                 shuffle=False,
                                 drop_last=False)
    else:
        raise Exception("not implemented")

    if args.model == 'dcp':
        net = DCP(args).cuda()
        if args.eval:
            if args.model_path is '':
                model_path = 'checkpoints' + '/' + args.exp_name + '/models/model.best.t7'
            else:
                model_path = args.model_path
                print(model_path)
            if not os.path.exists(model_path):
                print("can't find pretrained model")
                return
            net.load_state_dict(torch.load(model_path), strict=False)
        if torch.cuda.device_count() > 1:
            net = nn.DataParallel(net)
            print("Let's use", torch.cuda.device_count(), "GPUs!")
    else:
        raise Exception('Not implemented')
    if args.eval:
        test(args, net, test_loader, boardio, textio)
    else:
        train(args, net, train_loader, test_loader, boardio, textio)

    print('FINISH')
    boardio.close()
Exemplo n.º 3
0
def main():
    parser = hmnet_arguments()
    args = parser.parse_args()
    # 保证实验的可重复性
    torch.backends.cudnn.deterministic = True
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)
    _init_(args)
    if args.dataset == 'ours':
        train_loader = DataLoader(ModelNet40(
            num_points=args.n_points,
            num_subsampled_points=args.n_subsampled_points,
            partition='train',
            gaussian_noise=args.gaussian_noise,
            rot_factor=args.rot_factor,
            overlap=args.overlap),
                                  batch_size=args.train_batch_size,
                                  shuffle=True,
                                  drop_last=True,
                                  num_workers=args.num_workers)
        test_loader = DataLoader(ModelNet40(
            num_points=args.n_points,
            num_subsampled_points=args.n_subsampled_points,
            partition='test',
            gaussian_noise=args.gaussian_noise,
            rot_factor=args.rot_factor,
            overlap=args.overlap),
                                 batch_size=args.test_batch_size,
                                 shuffle=False,
                                 drop_last=False,
                                 num_workers=args.num_workers)
    else:
        raise Exception("not implemented")
    if args.model == 'hmnet':
        if args.eval:
            if args.model_path is '':
                model_path = 'checkpoints' + '/' + args.exp_name + '/models/model.best.t7'
            else:
                model_path = args.model_path
            if not os.path.exists(model_path):
                print("can't find pretrained model")
                return
            model_2 = MatchNet_2(args)
            net = HMNet(args, model_2)
            eval_model(net, test_loader)
        else:
            train(args, train_loader, test_loader)
    else:
        raise Exception('Not implemented')

    print('FINISH')
Exemplo n.º 4
0
def test(args, io):
    test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points),
                            batch_size=args.test_batch_size, shuffle=True, drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    model = Pct(args).to(device)
    model = nn.DataParallel(model) 
    
    model.load_state_dict(torch.load(args.model_path))
    model = model.eval()
    test_true = []
    test_pred = []

    for data, label in test_loader:
        data, label = data.to(device), label.to(device).squeeze()
        data = data.permute(0, 2, 1)
        logits = model(data)
        preds = logits.max(dim=1)[1] 
        test_true.append(label.cpu().numpy())
        test_pred.append(preds.detach().cpu().numpy())

    test_true = np.concatenate(test_true)
    test_pred = np.concatenate(test_pred)
    test_acc = metrics.accuracy_score(test_true, test_pred)
    avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
    outstr = 'Test :: test acc: %.6f, test avg acc: %.6f'%(test_acc, avg_per_class_acc)
    io.cprint(outstr)
Exemplo n.º 5
0
def main():
    args = parse_args()
    # print(f"args: {args}")
    os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"

    print("===> building and loading models ...")
    net = plot21H()
    # print(net)
    checkpoint = torch.load(args.checkpoint, map_location=torch.device('cpu'))
    new_check_point = OrderedDict()
    for k, v in checkpoint['net'].items():
        name = k[7:] # remove `module.`
        new_check_point[name] = v

    net.load_state_dict(new_check_point)
    net.eval()

    print('==> Preparing data ...')
    # train_set =ModelNet40(partition='train', num_points=args.num_points)
    test_set = ModelNet40(partition='test', num_points=args.num_points)

    data, label = test_set.__getitem__(args.id)
    plot_xyz(data, args, None, name=f"attention/Image-{args.id}.pdf" )
    data = torch.tensor(data).unsqueeze(dim=0)
    data = data.permute(0, 2, 1)
    with torch.no_grad():
        logits, structure_list = net(data)
    preds = logits.max(dim=1)[1]
    print(f"predict: {preds} | label: {label}")

    struct = structure_list[args.stage]
    plot_struct(struct, args)
Exemplo n.º 6
0
def test(args, io):
    if args.dataset == 'modelnet40':
        test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points), num_workers=8,
                                 batch_size=args.test_batch_size, shuffle=True, drop_last=False)
    elif args.dataset == 'ScanObjectNN':
        test_loader = DataLoader(ScanObjectNN(partition='test', num_points=args.num_points), num_workers=8,
                                 batch_size=args.test_batch_size, shuffle=True, drop_last=False)
    else:
        raise Exception("Dataset Not supported")

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    if args.model == 'pointnet':
        if args.dataset == 'modelnet40':
            model = PointNet(args, output_channels=40).to(device)
        elif args.dataset == 'ScanObjectNN':
            model = PointNet(args, output_channels=15).to(device)
        else:
            raise Exception("Dataset Not supported")
    elif args.model == 'dgcnn':
        if args.dataset == 'modelnet40':
            model = DGCNN(args, output_channels=40).to(device)
        elif args.dataset == 'ScanObjectNN':
            model = DGCNN(args, output_channels=15).to(device)
        else:
            raise Exception("Dataset Not supported")
    elif args.model == 'gbnet':
        if args.dataset == 'modelnet40':
            model = GBNet(args, output_channels=40).to(device)
        elif args.dataset == 'ScanObjectNN':
            model = GBNet(args, output_channels=15).to(device)
        else:
            raise Exception("Dataset Not supported")
    else:
        raise Exception("Not implemented")
    print(str(model))
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(args.model_path))
    model = model.eval()
    test_acc = 0.0
    count = 0.0
    test_true = []
    test_pred = []
    for data, label in test_loader:

        data, label = data.to(device), label.to(device).squeeze()
        data = data.permute(0, 2, 1)
        batch_size = data.size()[0]
        logits = model(data)
        preds = logits.max(dim=1)[1]
        test_true.append(label.cpu().numpy())
        test_pred.append(preds.detach().cpu().numpy())
    test_true = np.concatenate(test_true)
    test_pred = np.concatenate(test_pred)
    test_acc = metrics.accuracy_score(test_true, test_pred)
    avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
    outstr = 'Test :: test acc: %.6f, test avg acc: %.6f'%(test_acc, avg_per_class_acc)
    io.cprint(outstr)
Exemplo n.º 7
0
def test(args, io):
    test_loader = DataLoader(ModelNet40(partition='test',
                                        num_points=args.num_points),
                             batch_size=args.test_batch_size,
                             shuffle=True,
                             drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    if args.model == 'pointnet':
        model = PointNet(args).to(device)
    elif args.model == 'dgcnn':
        model = DGCNN(args).to(device)
    else:
        model = PaiNet(args).to(device)
        #raise Exception("Not implemented")
    print(str(model))
    # model = DGCNN(args).to(device)
    model = nn.DataParallel(model)
    if os.path.exists('checkpoints/%s/models/model_%s.t7' %
                      (args.exp_name, args.model)):
        checkpoint_dict = torch.load('./checkpoints/%s/models/model_%s.t7' %
                                     (args.exp_name, args.model),
                                     map_location=device)
        # pretrained_dict = {}
        # for k, v in checkpoint_dict.items():
        #     if 'transform' in k:
        #         k = k.replace('transform', 'paiIdxMatrix')
        #     pretrained_dict[k]=v
        # # pretrained_dict = {k: v for k, v in pretrained_dict.items() if 'transform' in k}
        model.load_state_dict(checkpoint_dict, strict=True)
        # torch.save(model.state_dict(), 'checkpoints/%s/models/model_%s_2048.t7' % (args.exp_name, args.model))
        print("Load model from './checkpoints/%s/models/model_%s_2048.t7 !'" %
              (args.exp_name, args.model))
    model = model.eval()
    test_acc = 0.0
    count = 0.0
    test_true = []
    test_pred = []
    for data, label in test_loader:

        data, label = data.to(device), label.to(device).squeeze()
        data = data.permute(0, 2, 1)
        batch_size = data.size()[0]
        logits = model(data)
        preds = logits.max(dim=1)[1]
        test_true.append(label.cpu().numpy())
        test_pred.append(preds.detach().cpu().numpy())
    test_true = np.concatenate(test_true)
    test_pred = np.concatenate(test_pred)
    test_acc = metrics.accuracy_score(test_true, test_pred)
    avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
    outstr = 'Test :: test acc: %.6f, test avg acc: %.6f' % (test_acc,
                                                             avg_per_class_acc)
    io.cprint(outstr)
Exemplo n.º 8
0
Arquivo: main.py Projeto: WDot/HDGN
def test(args, io):
    test_loader = DataLoader(ModelNet40(partition='test',
                                        num_points=args.num_points),
                             batch_size=args.test_batch_size,
                             shuffle=False,
                             drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    model = HDGN(args).to(device)
    model.load_state_dict(torch.load(args.model_path))
    model = model.eval()
    test_acc = 0.0
    count = 0.0
    test_true = []
    test_pred = []
    ind_acc = []
    ind_per_class_acc = []
    for i in range(20):
        test_pred.append([])
        test_true.append([])
        for data, label in test_loader:
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            logits = model(data)
            test_pred[i].append(logits.detach().cpu().numpy())
            test_true[i].append(label.cpu().numpy())
        test_pred[i] = np.concatenate(test_pred[i])
        ind_acc.append(
            metrics.accuracy_score(np.concatenate(test_true[0]),
                                   np.argmax(test_pred[i], 1)))
        ind_per_class_acc.append(
            metrics.balanced_accuracy_score(np.concatenate(test_true[0]),
                                            np.argmax(test_pred[i], 1)))
    test_true = np.concatenate(test_true[0])
    test_pred = np.stack(test_pred)
    test_pred_mean = np.mean(test_pred, axis=0)
    test_pred_mean = np.argmax(test_pred_mean, axis=1)
    test_acc = metrics.accuracy_score(test_true, test_pred_mean)
    avg_per_class_acc = metrics.balanced_accuracy_score(
        test_true, test_pred_mean)
    #if dist.get_rank() == 0:
    outstr = 'Test :: test acc: %.6f, test avg acc: %.6f' % (test_acc,
                                                             avg_per_class_acc)
    io.cprint(outstr)
    avg_ind_acc = np.mean(ind_acc)
    avg_ind_per_class_acc = np.mean(ind_per_class_acc)
    std_ind_acc = np.std(ind_acc)
    std_ind_per_class_acc = np.std(ind_per_class_acc)
    outstr = 'Test :: ind test acc: %.6f std %.6f, ind test avg acc: %.6f std %.6f' % (
        avg_ind_acc, std_ind_acc, avg_ind_per_class_acc, std_ind_per_class_acc)
    io.cprint(outstr)
Exemplo n.º 9
0
def test(args, io):
    test_loader = DataLoader(ModelNet40(partition='test',
                                        num_points=args.num_points),
                             batch_size=args.test_batch_size,
                             shuffle=True,
                             drop_last=False)
    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    if args.model == 'pointnet':
        model = PointNet(args).to(device)
    elif args.model == 'dgcnn':
        model = DGCNN(args).to(device)
    else:
        raise Exception("Not implemented")
    print(str(model))

    model = nn.DataParallel(model)
    checkpoint = torch.load(args.resume)
    model.load_state_dict(checkpoint['state_dict'])
    model = model.eval()
    test_acc = 0.0
    count = 0.0
    test_true = []
    test_pred = []
    SHAPE_NAMES = [line.rstrip() for line in \
                   open('data/modelnet40_ply_hdf5_2048/shape_names.txt')]
    NUM_CLASSES = 40
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    for data, label in test_loader:
        data, label = data.to(device), label.to(device).squeeze()
        data = data.permute(0, 2, 1)
        batch_size = data.size()[0]
        logits = model(data)
        preds = logits.max(dim=1)[1]
        test_true.append(label.cpu().numpy())
        test_pred.append(preds.detach().cpu().numpy())
    test_true = np.concatenate(test_true)
    test_pred = np.concatenate(test_pred)
    test_acc = metrics.accuracy_score(test_true, test_pred)
    avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
    outstr = 'Test :: test acc: %.6f, test avg acc: %.6f' % (test_acc,
                                                             avg_per_class_acc)
    io.cprint(outstr)
    for i in range(test_true.shape[0]):
        l = test_true[i]
        total_seen_class[l] += 1
        total_correct_class[l] += (test_pred[i] == l)
    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        io.cprint('%10s:\t%0.3f' % (name, class_accuracies[i]))
Exemplo n.º 10
0
def test(args, io):
    test_loader = DataLoader(ModelNet40(partition='test',
                                        num_points=args.num_points),
                             num_workers=5,
                             batch_size=args.test_batch_size,
                             shuffle=False,
                             drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")
    NUM_PEPEAT = 300
    NUM_VOTE = 10
    # Try to load models
    model = GDANET().to(device)
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(args.model_path))
    model = model.eval()
    best_acc = 0

    pointscale = PointcloudScale(scale_low=2. / 3.,
                                 scale_high=3. / 2.,
                                 trans_low=-0.2,
                                 trans_high=0.2,
                                 trans_open=True)
    for i in range(NUM_PEPEAT):
        test_true = []
        test_pred = []

        for data, label in test_loader:
            data, label = data.to(device), label.to(device).squeeze()
            pred = 0
            for v in range(NUM_VOTE):
                new_data = data
                batch_size = data.size()[0]
                if v > 0:
                    new_data.data = pointscale(new_data.data)
                with torch.no_grad():
                    pred += F.softmax(model(new_data.permute(0, 2, 1)), dim=1)
            pred /= NUM_VOTE
            label = label.view(-1)
            pred_choice = pred.max(dim=1)[1]
            test_true.append(label.cpu().numpy())
            test_pred.append(pred_choice.detach().cpu().numpy())
        test_true = np.concatenate(test_true)
        test_pred = np.concatenate(test_pred)
        test_acc = metrics.accuracy_score(test_true, test_pred)
        if test_acc > best_acc:
            best_acc = test_acc
        outstr = 'Voting %d, test acc: %.6f,' % (i, test_acc * 100)
        io.cprint(outstr)

    final_outstr = 'Final voting result test acc: %.6f,' % (best_acc * 100)
    io.cprint(final_outstr)
Exemplo n.º 11
0
def main():
    args = parse_args()
    # print(f"args: {args}")
    os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"

    print('==> Preparing data ...')
    # train_set =ModelNet40(partition='train', num_points=args.num_points)
    test_set = ModelNet40(partition='test', num_points=args.num_points)

    data, label = test_set.__getitem__(args.id)
    plot_xyz(data,
             args,
             name=f"forstructure/Image-{args.id}-{args.num_points}.pdf")
Exemplo n.º 12
0
Arquivo: main.py Projeto: sngver/dgcnn
def test(args, io):
    test_loader = DataLoader(ModelNet40(partition='test',
                                        num_points=args.num_points),
                             batch_size=args.test_batch_size,
                             shuffle=True,
                             drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    model = DGCNN(args).to(device)
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(args.model_path))
    model = model.eval()
    test_acc = 0.0
    count = 0.0
    test_true = []
    test_pred = []

    #
    DUMP_DIR = 'checkpoints/' + args.exp_name + '/' + 'dump'

    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
    #

    for data, label in test_loader:

        data, label = data.to(device), label.to(device).squeeze()
        data = data.permute(0, 2, 1)
        batch_size = data.size()[0]
        logits = model(data)
        preds = logits.max(dim=1)[1]
        test_true.append(label.cpu().numpy())
        test_pred.append(preds.detach().cpu().numpy())
    test_true = np.concatenate(test_true)
    test_pred = np.concatenate(test_pred)

    #
    for i in range(list(test_true.shape)[0]):
        fout.write('%d, %d\n' % (test_pred[i], test_true[i]))
    #

    test_acc = metrics.accuracy_score(test_true, test_pred)
    avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
    outstr = 'Test :: test acc: %.6f, test avg acc: %.6f' % (test_acc,
                                                             avg_per_class_acc)
    io.cprint(outstr)
Exemplo n.º 13
0
def test(args, io):
    test_loader = DataLoader(ModelNet40(partition='test',
                                        num_points=args.num_points),
                             batch_size=args.test_batch_size,
                             shuffle=True,
                             drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    if args.model == 'pointnet':
        model = PointNet(args).to(device)
    elif args.model == 'dgcnn':
        model = DGCNN_cls(args).to(device)
    else:
        raise Exception("Not implemented")

    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(args.model_path))
    model = model.eval()
    test_acc = 0.0
    count = 0.0
    test_true = []
    test_pred = []
    for data, label in test_loader:
        data, label = data.to(device), label.to(device).squeeze()
        data = data.permute(0, 2, 1)
        batch_size = data.size()[0]
        logits = model(data)
        preds = logits.max(dim=1)[1]
        test_true.append(label.cpu().numpy())
        test_pred.append(preds.detach().cpu().numpy())
        # visualize - added by jaeha
        if args.visualize:
            xyz = data[0].cpu()
            ax = plt.axes(projection='3d')
            ax.scatter(xyz[0, :], xyz[1, :], xyz[2, :], s=1, color='blue')
            plt.title('True: ' + class_lists[label[0]] + ' ,  Pred: ' +
                      class_lists[preds[0]])
            plt.show()
    test_true = np.concatenate(test_true)
    test_pred = np.concatenate(test_pred)
    test_acc = metrics.accuracy_score(test_true, test_pred)
    avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
    outstr = 'Test :: test acc: %.6f, test avg acc: %.6f' % (test_acc,
                                                             avg_per_class_acc)
    io.cprint(outstr)
def main():
    args = parse_args()
    print(f"args: {args}")
    os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        device = 'cuda'
        torch.cuda.manual_seed(args.seed)
    else:
        device = 'cpu'
    print(f"==> Using device: {device}")
    if args.msg is None:
        message = str(datetime.datetime.now().strftime('-%Y%m%d%H%M%S'))
    else:
        message = "-" + args.msg
    args.checkpoint = 'checkpoints/' + args.model + message

    print('==> Preparing data..')
    test_loader = DataLoader(ModelNet40(partition='test',
                                        num_points=args.num_points),
                             num_workers=8,
                             batch_size=args.batch_size,
                             shuffle=True,
                             drop_last=False)
    # Model
    print('==> Building model..')
    net = models.__dict__[args.model]()
    criterion = cal_loss
    net = net.to(device)
    checkpoint_path = os.path.join(args.checkpoint, 'best_checkpoint.pth')
    checkpoint = torch.load(checkpoint_path)
    # criterion = criterion.to(device)
    if device == 'cuda':
        net = torch.nn.DataParallel(net)
        cudnn.benchmark = True
    net.load_state_dict(checkpoint['net'])
    test_out = validate(net, test_loader, criterion, device)
    print(f"Vanilla out: {test_out}")

    print(f"===> start voting evaluation...")
    voting(net, test_loader, device, args)
Exemplo n.º 15
0
def test(args, io):
    test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points),
                             batch_size=args.test_batch_size, shuffle=True, drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    if args.model == 'pointnet':
        model = PointNet(args).to(device)
    elif args.model == 'dgcnn':
        model = DGCNN_cls(args).to(device)
    else:
        raise Exception("Not implemented")

    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(args.model_path))
    # model.load_state_dict(torch.load("/home/mask/xas_ws/dgcnn_pytorch/checkpoints/cls_1024/models/model.t7"))
    model = model.eval()
    test_acc = 0.0
    count = 0.0
    test_true = []
    test_pred = []
    for data, label in test_loader:

        data, label = data.to(device), label.to(device).squeeze()
        data = data.permute(0, 2, 1)
        batch_size = data.size()[0]
        logits = model(data)
        preds = logits.max(dim=1)[1]
        test_true.append(label.cpu().numpy())
        test_pred.append(preds.detach().cpu().numpy())
    test_true = np.concatenate(test_true)
    test_pred = np.concatenate(test_pred)
    test_acc = metrics.accuracy_score(test_true, test_pred)
    avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
    outstr = 'Test :: test acc: %.6f, test avg acc: %.6f'%(test_acc, avg_per_class_acc)
    io.cprint(outstr)
Exemplo n.º 16
0
def main():
    parser = argparse.ArgumentParser(description='Point Cloud Registration')
    parser.add_argument('--exp_name',
                        type=str,
                        default='exp',
                        metavar='N',
                        help='Name of the experiment')
    parser.add_argument('--model',
                        type=str,
                        default='prnet',
                        metavar='N',
                        choices=['prnet'],
                        help='Model to use, [prnet]')
    parser.add_argument('--emb_nn',
                        type=str,
                        default='dgcnn',
                        metavar='N',
                        choices=['pointnet', 'dgcnn'],
                        help='Embedding to use, [pointnet, dgcnn]')
    parser.add_argument('--attention',
                        type=str,
                        default='transformer',
                        metavar='N',
                        choices=['identity', 'transformer'],
                        help='Head to use, [identity, transformer]')
    parser.add_argument('--head',
                        type=str,
                        default='svd',
                        metavar='N',
                        choices=['mlp', 'svd'],
                        help='Head to use, [mlp, svd]')
    parser.add_argument('--n_emb_dims',
                        type=int,
                        default=512,
                        metavar='N',
                        help='Dimension of embeddings')
    parser.add_argument('--n_blocks',
                        type=int,
                        default=1,
                        metavar='N',
                        help='Num of blocks of encoder&decoder')
    parser.add_argument('--n_heads',
                        type=int,
                        default=4,
                        metavar='N',
                        help='Num of heads in multiheadedattention')
    parser.add_argument('--n_iters',
                        type=int,
                        default=3,
                        metavar='N',
                        help='Num of iters to run inference')
    parser.add_argument('--discount_factor',
                        type=float,
                        default=0.9,
                        metavar='N',
                        help='Discount factor to compute the loss')
    parser.add_argument('--n_ff_dims',
                        type=int,
                        default=1024,
                        metavar='N',
                        help='Num of dimensions of fc in transformer')
    parser.add_argument('--n_keypoints',
                        type=int,
                        default=512,
                        metavar='N',
                        help='Num of keypoints to use')
    parser.add_argument('--temp_factor',
                        type=float,
                        default=100,
                        metavar='N',
                        help='Factor to control the softmax precision')
    parser.add_argument(
        '--cat_sampler',
        type=str,
        default='gumbel_softmax',
        choices=['softmax', 'gumbel_softmax'],
        metavar='N',
        help='use gumbel_softmax to get the categorical sample')
    parser.add_argument('--dropout',
                        type=float,
                        default=0.0,
                        metavar='N',
                        help='Dropout ratio in transformer')
    parser.add_argument('--batch_size',
                        type=int,
                        default=3,
                        metavar='batch_size',
                        help='Size of batch)')
    parser.add_argument('--test_batch_size',
                        type=int,
                        default=12,
                        metavar='batch_size',
                        help='Size of batch)')
    parser.add_argument('--epochs',
                        type=int,
                        default=100,
                        metavar='N',
                        help='number of episode to train ')
    parser.add_argument('--use_sgd', type=bool, default=False, help='Use SGD')
    parser.add_argument(
        '--lr',
        type=float,
        default=0.001,
        metavar='LR',
        help='learning rate (default: 0.001, 0.1 if using sgd)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.9,
                        metavar='M',
                        help='SGD momentum (default: 0.9)')
    parser.add_argument('--no_cuda',
                        action='store_true',
                        default=False,
                        help='enables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1234,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--eval',
                        action='store_true',
                        default=False,
                        help='evaluate the model')
    parser.add_argument('--cycle_consistency_loss',
                        type=float,
                        default=0.1,
                        metavar='N',
                        help='cycle consistency loss')
    parser.add_argument('--feature_alignment_loss',
                        type=float,
                        default=0.1,
                        metavar='N',
                        help='feature alignment loss')
    parser.add_argument('--gaussian_noise',
                        type=bool,
                        default=False,
                        metavar='N',
                        help='Wheter to add gaussian noise')
    parser.add_argument('--unseen',
                        type=bool,
                        default=False,
                        metavar='N',
                        help='Wheter to test on unseen category')
    parser.add_argument('--n_points',
                        type=int,
                        default=1024,
                        metavar='N',
                        help='Num of points to use')
    parser.add_argument('--n_subsampled_points',
                        type=int,
                        default=768,
                        metavar='N',
                        help='Num of subsampled points to use')
    parser.add_argument('--dataset',
                        type=str,
                        default='modelnet40',
                        choices=['modelnet40'],
                        metavar='N',
                        help='dataset to use')
    parser.add_argument('--rot_factor',
                        type=float,
                        default=4,
                        metavar='N',
                        help='Divided factor of rotation')
    parser.add_argument('--model_path',
                        type=str,
                        default='',
                        metavar='N',
                        help='Pretrained model path')

    args = parser.parse_args()
    torch.backends.cudnn.deterministic = True
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)

    _init_(args)

    if args.dataset == 'modelnet40':
        train_loader = DataLoader(ModelNet40(
            num_points=args.n_points,
            num_subsampled_points=args.n_subsampled_points,
            partition='train',
            gaussian_noise=args.gaussian_noise,
            unseen=args.unseen,
            rot_factor=args.rot_factor),
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  drop_last=True,
                                  num_workers=6)
        test_loader = DataLoader(ModelNet40(
            num_points=args.n_points,
            num_subsampled_points=args.n_subsampled_points,
            partition='test',
            gaussian_noise=args.gaussian_noise,
            unseen=args.unseen,
            rot_factor=args.rot_factor),
                                 batch_size=args.test_batch_size,
                                 shuffle=False,
                                 drop_last=False,
                                 num_workers=6)
    else:
        raise Exception("not implemented")

    if args.model == 'prnet':
        net = PRNet(args).cuda()
        if args.eval:
            if args.model_path == '':
                model_path = 'checkpoints' + '/' + args.exp_name + '/models/model.best.t7'
            else:
                model_path = args.model_path
            if not os.path.exists(model_path):
                print("can't find pretrained model")
                return
    else:
        raise Exception('Not implemented')
    if not args.eval:
        train(args, net, train_loader, test_loader)

    print('FINISH')
Exemplo n.º 17
0
def main():
    args = config_params()
    print(args)

    setup_seed(args.seed)
    if not os.path.exists(args.saved_path):
        os.makedirs(args.saved_path)
    summary_path = os.path.join(args.saved_path, 'summary')
    if not os.path.exists(summary_path):
        os.makedirs(summary_path)
    checkpoints_path = os.path.join(args.saved_path, 'checkpoints')
    if not os.path.exists(checkpoints_path):
        os.makedirs(checkpoints_path)

    train_set = ModelNet40(root=args.root,
                           npts=args.train_npts,
                           train=True,
                           normal=args.normal,
                           mode=args.mode)
    test_set = ModelNet40(root=args.root,
                          npts=args.train_npts,
                          train=False,
                          normal=args.normal,
                          mode=args.mode)
    train_loader = DataLoader(train_set,
                              batch_size=args.batchsize,
                              shuffle=True,
                              num_workers=args.num_workers)
    test_loader = DataLoader(test_set,
                             batch_size=args.batchsize,
                             shuffle=False,
                             num_workers=args.num_workers)

    in_dim = 6 if args.normal else 3
    model = IterativeBenchmark(in_dim=in_dim, niters=args.niters, gn=args.gn)
    model = model.cuda()
    loss_fn = EMDLosspy().cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=args.milestones, gamma=args.gamma, last_epoch=-1)

    writer = SummaryWriter(summary_path)

    test_min_loss, test_min_r_mse_error, test_min_rot_error = \
        float('inf'), float('inf'), float('inf')
    for epoch in range(args.epoches):
        print('=' * 20, epoch + 1, '=' * 20)
        train_results = train_one_epoch(train_loader, model, loss_fn,
                                        optimizer)
        print_train_info(train_results)
        test_results = test_one_epoch(test_loader, model, loss_fn)
        print_train_info(test_results)

        if epoch % args.saved_frequency == 0:
            writer.add_scalar('Loss/train', train_results['loss'], epoch + 1)
            writer.add_scalar('Loss/test', test_results['loss'], epoch + 1)
            writer.add_scalar('RError/train', train_results['r_mse'],
                              epoch + 1)
            writer.add_scalar('RError/test', test_results['r_mse'], epoch + 1)
            writer.add_scalar('rotError/train', train_results['r_isotropic'],
                              epoch + 1)
            writer.add_scalar('rotError/test', test_results['r_isotropic'],
                              epoch + 1)
            writer.add_scalar('Lr', optimizer.param_groups[0]['lr'], epoch + 1)
        test_loss, test_r_error, test_rot_error = \
            test_results['loss'], test_results['r_mse'], test_results['r_isotropic']
        if test_loss < test_min_loss:
            saved_path = os.path.join(checkpoints_path, "test_min_loss.pth")
            torch.save(model.state_dict(), saved_path)
            test_min_loss = test_loss
        if test_r_error < test_min_r_mse_error:
            saved_path = os.path.join(checkpoints_path,
                                      "test_min_rmse_error.pth")
            torch.save(model.state_dict(), saved_path)
            test_min_r_mse_error = test_r_error
        if test_rot_error < test_min_rot_error:
            saved_path = os.path.join(checkpoints_path,
                                      "test_min_rot_error.pth")
            torch.save(model.state_dict(), saved_path)
            test_min_rot_error = test_rot_error
        scheduler.step()
Exemplo n.º 18
0
def train(args, io):
    device = torch.device("cuda" if args.cuda else "cpu")

    if args.pre_train == True:
        train_loader = DataLoader(ShapeNetPerm(partition='train',
                                               num_points=args.num_points),
                                  num_workers=1,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  drop_last=True)
        test_loader = DataLoader(ShapeNetPerm(partition='test',
                                              num_points=args.num_points),
                                 num_workers=1,
                                 batch_size=args.test_batch_size,
                                 shuffle=True,
                                 drop_last=False)
        #        model = pct_semantic(args).to(device)
        model = Pct_semantic(args).to(device)
    else:
        train_loader = DataLoader(ModelNet40(partition='train',
                                             num_points=args.num_points),
                                  num_workers=8,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  drop_last=True)
        test_loader = DataLoader(ModelNet40(partition='test',
                                            num_points=args.num_points),
                                 num_workers=8,
                                 batch_size=args.test_batch_size,
                                 shuffle=True,
                                 drop_last=False)
        #        model = Pct(args).to(device)
        model = pct_simple(args).to(device)

    print(str(model))
    model = nn.DataParallel(model)

    if args.cont:
        model.load_state_dict(torch.load(args.model_path))

    if args.load_pretrain:
        print('loading pretrained model')
        pretrain_model = pct_semantic(args).to(device)
        pretrain_model = nn.DataParallel(pretrain_model)
        pretrain_model.load_state_dict(torch.load(args.model_path))
        pretrained_dict = pretrain_model.state_dict()
        model_dict = model.state_dict()
        #filter out unnecessary keys
        pretrained_dict = {
            k: v
            for k, v in pretrained_dict.items() if k in model_dict
        }
        #overwrite entries in the existing state dict
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict, strict=True)
        print('pretrained model loaded')

    if args.use_sgd:
        print("Use SGD")
        opt = optim.SGD(model.parameters(),
                        lr=args.lr * 100,
                        momentum=args.momentum,
                        weight_decay=5e-4)
    else:
        print("Use Adam")
        opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)

    scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr)
    if args.pre_train:
        criterion = nn.CrossEntropyLoss()
    else:
        criterion = cal_loss
    best_test_acc = 0
    best_test_loss = 9999.

    for epoch in range(args.epochs):
        scheduler.step()
        train_loss = 0.0
        count = 0.0
        model.train()
        train_pred = []
        train_true = []
        idx = 0
        total_time = 0.0
        for data, label in (train_loader):
            data, label = data.to(device), label.to(device).squeeze()
            #            print(data.size())
            #            print(label.size())
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            opt.zero_grad()

            start_time = time.time()
            logits = model(data)
            #            print(logits.size())
            #            print(label.size())
            if args.pre_train:
                loss = criterion(logits, label)
            else:
                loss = criterion(logits,
                                 label,
                                 smoothing=(args.pre_train),
                                 pre_train=args.pre_train)
            loss.backward()
            opt.step()
            end_time = time.time()
            total_time += (end_time - start_time)

            preds = logits.max(dim=1)[1]
            count += batch_size
            train_loss += loss.item() * batch_size
            train_true.append(label.cpu().numpy())
            train_pred.append(preds.detach().cpu().numpy())
            idx += 1

        print('train total time is', total_time)
        train_true = np.concatenate(train_true)
        train_pred = np.concatenate(train_pred)
        if args.pre_train:
            train_diff = train_true - train_pred
            correct = np.where(train_diff == 0)[0]
            #        print('correct shape: {0}'.format(correct.shape))
            #        print('total shape: {0}'.format(train_diff.shape))
            total_samples = train_diff.shape[0] * train_diff.shape[1]
            accuracy = correct.shape[0] / total_samples

            outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (
                epoch, train_loss * 1.0 / count, accuracy, 0.)
#                                                                                metrics.accuracy_score(
#                                                                                train_true, train_pred),
#                                                                                metrics.balanced_accuracy_score(
#                                                                                train_true, train_pred))
        else:
            outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (
                epoch,
                train_loss * 1.0 / count,
                #                                                                                accuracy, 0.)
                metrics.accuracy_score(train_true, train_pred),
                metrics.balanced_accuracy_score(train_true, train_pred))
        io.cprint(outstr)

        ####################
        # Test
        ####################
        test_loss = 0.0
        count = 0.0
        model.eval()
        test_pred = []
        test_true = []
        total_time = 0.0
        for data, label in test_loader:
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            start_time = time.time()
            logits = model(data)
            end_time = time.time()
            total_time += (end_time - start_time)
            loss = criterion(logits, label)
            preds = logits.max(dim=1)[1]
            count += batch_size
            test_loss += loss.item() * batch_size
            test_true.append(label.cpu().numpy())
            test_pred.append(preds.detach().cpu().numpy())
        print('test total time is', total_time)
        test_true = np.concatenate(test_true)
        test_pred = np.concatenate(test_pred)

        #        test_diff = test_true - test_pred
        #        correct_test = np.where(test_diff == 0)[0]
        #        print('correct shape: {0}'.format(correct.shape))
        #        print('total shape: {0}'.format(train_diff.shape))
        #        total_samples_test = test_diff.shape[0]*test_diff.shape[1]
        #        accuracy_test = correct_test.shape[0]/total_samples_test

        #        test_acc = 0.
        #        test_acc = accuracy_test

        #        avg_per_class_acc = 0.
        if args.pre_train:
            train_diff = train_true - train_pred
            correct = np.where(train_diff == 0)[0]
            #        print('correct shape: {0}'.format(correct.shape))
            #        print('total shape: {0}'.format(train_diff.shape))
            total_samples = train_diff.shape[0] * train_diff.shape[1]
            accuracy = correct.shape[0] / total_samples

            outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (
                epoch, train_loss * 1.0 / count, accuracy, 0.)


#                                                                                metrics.accuracy_score(
#                                                                                train_true, train_pred),
#                                                                                metrics.balanced_accuracy_score(
#                                                                                train_true, train_pred))
        else:

            test_acc = metrics.accuracy_score(test_true, test_pred)
            avg_per_class_acc = metrics.balanced_accuracy_score(
                test_true, test_pred)
            outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (
                epoch, test_loss * 1.0 / count, test_acc, avg_per_class_acc)
        io.cprint(outstr)
        if args.pre_train:
            if test_acc >= best_test_acc:
                best_test_acc = test_acc
                torch.save(model.state_dict(),
                           'checkpoints/%s/models/model.t7' % args.exp_name)
        else:
            if test_loss <= best_test_loss:
                best_test_loss = test_loss
                print('Saving Checkpoint...')
                if args.cont:
                    torch.save(
                        model.state_dict(),
                        'checkpoints/%s/models/model1.t7' % args.exp_name)
                else:
                    torch.save(
                        model.state_dict(),
                        'checkpoints/%s/models/model.t7' % args.exp_name)
Exemplo n.º 19
0
def test(args, io):
    #test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points),
    #                         batch_size=args.test_batch_size, shuffle=True, drop_last=False)
    test_loader = DataLoader(ModelNet40(partition='test',
                                        num_points=args.num_points),
                             batch_size=args.test_batch_size,
                             shuffle=False,
                             drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    if args.model == 'pointnet':
        model = PointNet(args).to(device)
    elif args.model == 'dgcnn':
        model = DGCNN(args).to(device)
    elif args.model == 'TransformerBaseline':
        model = DGCNN_Transformer(args).to(device)
    elif args.model == 'TemporalTransformer':
        model = DGCNN_TemporalTransformer(args).to(device)
    elif args.model == 'TemporalTransformer_v2':
        model = DGCNN_TemporalTransformer_v2(args).to(device)
    elif args.model == 'TemporalTransformer_v3':
        model = DGCNN_TemporalTransformer_v3(args).to(device)
    elif args.model == 'pi':
        model = pi_DGCNN(args).to(device)
    elif args.model == 'pi2':
        model = pi_DGCNN_v2(args).to(device)
    elif args.model == 'pipoint':
        model = pipoint_DGCNN(args).to(device)
    else:
        raise Exception("Not implemented")

    print(model)
    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    model = nn.DataParallel(model)

    if args.model_path:
        if os.path.isfile(args.model_path):
            print("=> loading checkpoint '{}'".format(args.model_path))
            checkpoint = torch.load(args.model_path)
            print(checkpoint)

            if 'epoch' in checkpoint:
                args.start_epoch = checkpoint['epoch']
            #best_prec1 = checkpoint['best_prec1']
            #best_prec5 = checkpoint['best_prec5']

            if 'state_dict' in checkpoint:
                model.load_state_dict(checkpoint['state_dict'], strict=False)
            else:
                model.load_state_dict(checkpoint, strict=False)

            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.model_path, args.start_epoch))
        else:
            print("=> no checkpoint found at '{}'".format(args.model_path))
    #model.load_state_dict(torch.load(args.model_path))

    model = model.eval()
    test_acc = 0.0
    count = 0.0
    test_true = []
    test_pred = []

    batch_time = AverageMeter()

    end = time.time()
    for i, (data, label) in enumerate(test_loader):

        data, label = data.to(device), label.to(device).squeeze()
        data = data.permute(0, 2, 1)
        batch_size = data.size()[0]

        if args.model in ["pi", "pipoint", "pi2"]:
            logits, atts = model(data)
        elif args.model in [
                "TransformerBaseline", "TemporalTransformer",
                "TemporalTransformer_v2"
        ]:
            logits = model(data)
        else:
            logits, degree = model(data)

        preds = logits.max(dim=1)[1]
        test_true.append(label.cpu().numpy())
        test_pred.append(preds.detach().cpu().numpy())
        batch_time.update(time.time() - end)
        end = time.time()

        if i % 10 == 0:
            print('Test {}, Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.
                  format(i, batch_time=batch_time))

    test_true = np.concatenate(test_true)
    test_pred = np.concatenate(test_pred)
    test_acc = metrics.accuracy_score(test_true, test_pred)
    avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)

    per_class_acc = metrics.precision_score(test_true, test_pred, average=None)

    outstr = 'Test :: test acc: %.6f, test avg acc: %.6f' % (test_acc,
                                                             avg_per_class_acc)
    outstr_2 = 'Test per class acc: {}' % per_class_acc
    io.cprint(outstr)
    io.cprint(outstr_2)

    if args.model in ["pi", "pipoint", "pi2"]:
        for j in range(4):
            io.cprint('Att {} : {}'.format(j, atts[j].mean().item()))
Exemplo n.º 20
0
Arquivo: main.py Projeto: WDot/HDGN
def train(args, io):
    train_dataset = ModelNet40(partition='train', num_points=args.num_points)
    train_loader = DataLoader(train_dataset,
                              num_workers=0,
                              batch_size=args.batch_size,
                              shuffle=True,
                              drop_last=True)
    test_dataset = ModelNet40(partition='test', num_points=args.num_points)
    test_loader = DataLoader(test_dataset,
                             num_workers=0,
                             batch_size=args.test_batch_size,
                             shuffle=False,
                             drop_last=False)

    device = 0

    #Try to load models
    if args.model == 'hdgn':
        model = HDGN(args).to(device)
    else:
        raise Exception("Not implemented")
    print(str(model))
    sys.stdout.flush()
    print("Let's use", torch.cuda.device_count(), "GPUs!")

    if args.use_sgd:
        print("Use SGD")
        opt = optim.SGD(model.parameters(),
                        lr=args.lr * 100,
                        momentum=args.momentum,
                        weight_decay=1e-4)
    else:
        print("Use Adam")
        opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)

    scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr)

    criterion = cal_loss

    best_test_acc = 0
    for epoch in range(args.epochs):
        scheduler.step()
        ####################
        # Train
        ####################
        train_loss = 0.0
        count = 0.0
        model.train()
        train_pred = []
        train_true = []
        for data, label in train_loader:
            data, label = data.to(device), label.to(device).squeeze(dim=-1)
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            opt.zero_grad()
            logits = model(data)
            loss = criterion(logits, label)
            loss.backward()
            opt.step()
            preds = logits.max(dim=1)[1]
            count += batch_size
            train_loss += loss.item() * batch_size
            train_true.append(label.cpu().numpy())
            train_pred.append(preds.detach().cpu().numpy())
        train_true = np.concatenate(train_true)
        train_pred = np.concatenate(train_pred)
        outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (
            epoch, train_loss * 1.0 / count,
            metrics.accuracy_score(train_true, train_pred),
            metrics.balanced_accuracy_score(train_true, train_pred))
        io.cprint(outstr)
        sys.stdout.flush()

        ####################
        # Test
        ####################

        test_loss = 0.0
        count = 0.0
        model.eval()
        test_pred = []
        test_true = []
        for data, label in test_loader:
            data, label = data.to(device), label.to(device).squeeze(dim=-1)
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            logits, wsl = model(data)
            loss = criterion(logits, label) + wsl
            preds = logits.max(dim=1)[1]
            count += batch_size
            test_loss += loss.item() * batch_size
            test_true.append(label.cpu().numpy())
            test_pred.append(preds.detach().cpu().numpy())
        test_true = np.concatenate(test_true)
        test_pred = np.concatenate(test_pred)
        test_acc = metrics.accuracy_score(test_true, test_pred)
        mean_test_acc = metrics.accuracy_score(test_true, test_pred)
        avg_per_class_acc = metrics.balanced_accuracy_score(
            test_true, test_pred)
        outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (
            epoch, test_loss * 1.0 / count, mean_test_acc, avg_per_class_acc)
        io.cprint(outstr)
        sys.stdout.flush()
        if mean_test_acc >= best_test_acc:
            best_test_acc = mean_test_acc
            torch.save(model.state_dict(),
                       'checkpoints/%s/models/model.t7' % args.exp_name)
Exemplo n.º 21
0
def train(args, io):
    train_loader = DataLoader(ModelNet40(partition='train',
                                         num_points=args.num_points),
                              num_workers=8,
                              batch_size=args.batch_size,
                              shuffle=True,
                              drop_last=True)
    test_loader = DataLoader(ModelNet40(partition='test',
                                        num_points=args.num_points),
                             num_workers=8,
                             batch_size=args.test_batch_size,
                             shuffle=False,
                             drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    if args.model == 'pointnet':
        model = PointNet(args).to(device)
    elif args.model == 'dgcnn':
        model = DGCNN(args).to(device)
    else:
        model = PaiNet(args).to(device)
        #raise Exception("Not implemented")
    print(str(model))
    params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print("Total number of parameters is: {}".format(params))
    model = nn.DataParallel(model)
    print("Let's use", torch.cuda.device_count(), "GPUs!")
    # if os.path.exists('checkpoints/%s/models/model_%s.t7'% (args.exp_name, args.model)):
    #     checkpoint_dict = torch.load('./checkpoints/%s/models/model_%s.t7'% (args.exp_name, args.model), map_location=device)
    #     model_dict = model.state_dict()
    #     pretrained_dict = checkpoint_dict
    #     pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict and "one_padding" not in k}
    #     model_dict.update(pretrained_dict)
    #     model.load_state_dict(pretrained_dict, strict=False)
    #     #model.load_state_dict(checkpoint_dict, strict=True)
    #     print("Load model from './checkpoints/%s/models/model_%s.t7 !'"% (args.exp_name, args.model))

    if args.use_sgd:
        print("Use SGD")

        trainables_wo_bn = [param for name, param in model.named_parameters()
                       if param.requires_grad \
                       and "bn" not in name \
                       and "kernals" not in name]
        trainables_wt_bn = [
            param for name, param in model.named_parameters()
            if param.requires_grad and ('bn' in name or "kernals" in name)
        ]
        opt = optim.SGD([{
            'params': trainables_wo_bn,
            'weight_decay': 5e-5
        }, {
            'params': trainables_wt_bn
        }],
                        lr=args.lr * 100,
                        momentum=args.momentum)
        #opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=1e-4)
    else:
        print("Use Adam")
        opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)

    scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr)
    criterion = cal_loss

    best_test_acc = 0  # 0.931929
    for epoch in range(args.epochs):
        ####################
        # Train
        ####################
        train_loss = 0.0
        count = 0.0
        model.train()
        train_pred = []
        train_true = []
        for b, (data, label) in enumerate(tqdm(train_loader, ncols=0)):
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            opt.zero_grad()
            logits = model(data)
            loss = criterion(logits, label)
            loss.backward()
            opt.step()
            preds = logits.max(dim=1)[1]
            count += batch_size
            train_loss += loss.item() * batch_size
            train_true.append(label.cpu().numpy())
            train_pred.append(preds.detach().cpu().numpy())

        scheduler.step()
        # if epoch % 100 == 0:
        #     scheduler = CosineAnnealingLR(opt, 50, eta_min=args.lr)
        train_true = np.concatenate(train_true)
        train_pred = np.concatenate(train_pred)
        outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f, lr: %.6f' % (
            epoch, train_loss * 1.0 / count,
            metrics.accuracy_score(train_true, train_pred),
            metrics.balanced_accuracy_score(train_true,
                                            train_pred), scheduler.get_lr()[0])
        io.cprint(outstr)

        ####################
        # Test
        ####################
        test_loss = 0.0
        count = 0.0
        model.eval()
        test_pred = []
        test_true = []
        with torch.no_grad():
            for data, label in test_loader:

                data, label = data.to(device), label.to(device).squeeze()
                data = data.permute(0, 2, 1)
                batch_size = data.size()[0]
                logits = model(data)
                loss = criterion(logits, label)
                preds = logits.max(dim=1)[1]
                count += batch_size
                test_loss += loss.item() * batch_size
                test_true.append(label.cpu().numpy())
                test_pred.append(preds.detach().cpu().numpy())
        test_true = np.concatenate(test_true)
        test_pred = np.concatenate(test_pred)
        test_acc = metrics.accuracy_score(test_true, test_pred)
        avg_per_class_acc = metrics.balanced_accuracy_score(
            test_true, test_pred)
        if test_acc >= best_test_acc:
            best_test_acc = test_acc
            torch.save(
                model.state_dict(), 'checkpoints/%s/models/model_%s.t7' %
                (args.exp_name, args.model))
        outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f, test best: %.6f' % (
            epoch, test_loss * 1.0 / count, test_acc, avg_per_class_acc,
            best_test_acc)
        io.cprint(outstr)
Exemplo n.º 22
0
def main():
    arg_bool = lambda x: x.lower() in ['true', 't', '1']
    parser = argparse.ArgumentParser(description='Point Cloud Registration')
    parser.add_argument('--exp_name',
                        type=str,
                        default='exp',
                        metavar='N',
                        help='Name of the experiment')
    parser.add_argument('--num_iter',
                        type=int,
                        default=4,
                        metavar='N',
                        help='Number of iteration inside the network')
    parser.add_argument('--emb_nn',
                        type=str,
                        default='GNN',
                        metavar='N',
                        help='Feature extraction method. [GNN]')
    parser.add_argument('--emb_dims',
                        type=int,
                        default=64,
                        metavar='N',
                        help='Dimension of embeddings')
    parser.add_argument('--batch_size',
                        type=int,
                        default=16,
                        metavar='batch_size',
                        help='Size of batch)')
    parser.add_argument('--test_batch_size',
                        type=int,
                        default=16,
                        metavar='batch_size',
                        help='Size of batch)')
    parser.add_argument('--epochs',
                        type=int,
                        default=50,
                        metavar='N',
                        help='number of episode to train ')
    parser.add_argument('--unseen',
                        type=arg_bool,
                        default='True',
                        help='Test on unseen categories')
    parser.add_argument('--gaussian_noise',
                        type=arg_bool,
                        default='True',
                        help='Wheter to add gaussian noise')
    parser.add_argument(
        '--alpha',
        type=float,
        default=0.75,
        metavar='N',
        help='Fraction of points when sampling partial point cloud')
    parser.add_argument('--factor',
                        type=float,
                        default=4,
                        metavar='N',
                        help='Divided factor for rotations')  # 旋转角度在pi/4范围内
    parser.add_argument('--pretrained',
                        type=arg_bool,
                        default='False',
                        help='Load pretrained weight')  # 旋转角度在pi/4范围内

    args = parser.parse_args()
    print(args)

    ##### make checkpoint directory and backup #####
    if not os.path.exists('checkpoints'):
        os.makedirs('checkpoints')
    if not os.path.exists('checkpoints/' + args.exp_name):
        os.makedirs('checkpoints/' + args.exp_name)
    if not os.path.exists('checkpoints/' + args.exp_name + '/' + 'models'):
        os.makedirs('checkpoints/' + args.exp_name + '/' + 'models')
    os.system('cp main.py checkpoints' + '/' + args.exp_name + '/' +
              'main.py.backup')
    os.system('cp model.py checkpoints' + '/' + args.exp_name + '/' +
              'model.py.backup')
    os.system('cp data.py checkpoints' + '/' + args.exp_name + '/' +
              'data.py.backup')
    ##### make checkpoint directory and backup #####

    io = IOStream('checkpoints/' + args.exp_name + '/log.txt')
    io.cprint(str(args))

    ##### load data #####
    train_loader = DataLoader(ModelNet40(partition='train',
                                         alpha=args.alpha,
                                         gaussian_noise=args.gaussian_noise,
                                         unseen=args.unseen,
                                         factor=args.factor),
                              batch_size=args.batch_size,
                              shuffle=True,
                              drop_last=True,
                              num_workers=8)
    test_loader = DataLoader(ModelNet40(partition='test',
                                        alpha=args.alpha,
                                        gaussian_noise=args.gaussian_noise,
                                        unseen=args.unseen,
                                        factor=args.factor),
                             batch_size=args.test_batch_size,
                             shuffle=False,
                             drop_last=False,
                             num_workers=8)

    ##### load model #####
    net = MFGNet(GNN(args.emb_dims), args).cuda()
    # net.load_state_dict(torch.load('model_gaussian.t7'))
    # for param in net.parameters():
    #     print(param.name, param.size())

    ##### train #####
    train(args, net, train_loader, test_loader, io)
    io.close()
Exemplo n.º 23
0
def main():
    parser = argparse.ArgumentParser(description='Point Cloud Registration')
    parser.add_argument('--exp_name',
                        type=str,
                        default='exp',
                        metavar='N',
                        help='Name of the experiment')
    parser.add_argument('--model',
                        type=str,
                        default='dcp',
                        metavar='N',
                        choices=['dcp'],
                        help='Model to use, [dcp]')
    parser.add_argument('--emb_nn',
                        type=str,
                        default='dgcnn',
                        metavar='N',
                        choices=['pointnet', 'dgcnn'],
                        help='Embedding nn to use, [pointnet, dgcnn]')
    parser.add_argument(
        '--pointer',
        type=str,
        default='transformer',
        metavar='N',
        choices=['identity', 'transformer'],
        help='Attention-based pointer generator to use, [identity, transformer]'
    )
    parser.add_argument('--head',
                        type=str,
                        default='svd',
                        metavar='N',
                        choices=[
                            'mlp',
                            'svd',
                        ],
                        help='Head to use, [mlp, svd]')
    parser.add_argument('--emb_dims',
                        type=int,
                        default=512,
                        metavar='N',
                        help='Dimension of embeddings')
    parser.add_argument('--n_blocks',
                        type=int,
                        default=1,
                        metavar='N',
                        help='Num of blocks of encoder&decoder')
    parser.add_argument('--n_heads',
                        type=int,
                        default=16,
                        metavar='N',
                        help='Num of heads in multiheadedattention')
    parser.add_argument('--ff_dims',
                        type=int,
                        default=1024,
                        metavar='N',
                        help='Num of dimensions of fc in transformer')
    parser.add_argument('--dropout',
                        type=float,
                        default=0.0,
                        metavar='N',
                        help='Dropout ratio in transformer')
    parser.add_argument('--batch_size',
                        type=int,
                        default=16,
                        metavar='batch_size',
                        help='Size of batch)')
    parser.add_argument('--test_batch_size',
                        type=int,
                        default=10,
                        metavar='batch_size',
                        help='Size of batch)')
    parser.add_argument('--epochs',
                        type=int,
                        default=250,
                        metavar='N',
                        help='number of episode to train ')
    parser.add_argument('--use_sgd',
                        action='store_true',
                        default=False,
                        help='Use SGD')
    parser.add_argument(
        '--lr',
        type=float,
        default=0.001,
        metavar='LR',
        help='learning rate (default: 0.001, 0.1 if using sgd)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.9,
                        metavar='M',
                        help='SGD momentum (default: 0.9)')
    parser.add_argument('--no_cuda',
                        action='store_true',
                        default=False,
                        help='enables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1234,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--eval',
                        action='store_true',
                        default=False,
                        help='evaluate the model')
    parser.add_argument('--cycle',
                        type=bool,
                        default=False,
                        metavar='N',
                        help='Whether to use cycle consistency')
    parser.add_argument('--gaussian_noise',
                        type=bool,
                        default=False,
                        metavar='N',
                        help='Wheter to add gaussian noise')
    parser.add_argument('--unseen',
                        type=bool,
                        default=False,
                        metavar='N',
                        help='Wheter to test on unseen category')
    parser.add_argument('--num_points',
                        type=int,
                        default=1024,
                        metavar='N',
                        help='Num of points to use')
    parser.add_argument('--dataset',
                        type=str,
                        default='modelnet40',
                        choices=['modelnet40', 'threedmatch'],
                        metavar='N',
                        help='dataset to use')
    parser.add_argument('--factor',
                        type=float,
                        default=4,
                        metavar='N',
                        help='Divided factor for rotations')
    parser.add_argument('--model_path',
                        type=str,
                        default='',
                        metavar='N',
                        help='Pretrained model path')
    parser.add_argument('--betas',
                        type=float,
                        default=(0.9, 0.999),
                        metavar='N',
                        nargs='+',
                        help='Betas for adam')
    parser.add_argument('--same_pointclouds',
                        type=bool,
                        default=True,
                        metavar='N',
                        help='R*src + t should be exactly same as target')
    parser.add_argument('--debug',
                        type=bool,
                        default=False,
                        metavar='N',
                        help='saves variables in folder variables_storage')
    parser.add_argument('--num_itr_test',
                        type=int,
                        default=1,
                        metavar='N',
                        help='Num of net() during testing')
    parser.add_argument(
        '--loss',
        type=str,
        default='cross_entropy_corr',
        metavar='N',
        choices=['cross_entropy_corr', 'mse_transf'],
        help='loss function: choose one of [mse_transf or cross_entropy_corr]')
    parser.add_argument('--cut_plane',
                        type=bool,
                        default=False,
                        metavar='N',
                        help='generates partial data')
    parser.add_argument('--one_cloud',
                        type=bool,
                        default=False,
                        metavar='N',
                        help='test for one unseen cloud')
    parser.add_argument(
        '--partial',
        type=float,
        default=0.0,
        metavar='N',
        help='partial = 0.1 ==> (num_points*partial) will be removed')
    parser.add_argument('--pretrained',
                        type=bool,
                        default=False,
                        metavar='N',
                        help='load pretrained model')

    args = parser.parse_args()

    # for deterministic training
    torch.backends.cudnn.deterministic = True
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)

    boardio = SummaryWriter(log_dir='checkpoints/' + args.exp_name)
    _init_(args)

    textio = IOStream('checkpoints/' + args.exp_name + '/run.log')
    textio.cprint(str(args))

    # dataloading
    num_workers = 32
    if args.dataset == 'modelnet40':
        train_dataset = ModelNet40(num_points=args.num_points,
                                   partition='train',
                                   gaussian_noise=args.gaussian_noise,
                                   unseen=args.unseen,
                                   factor=args.factor,
                                   same_pointclouds=args.same_pointclouds,
                                   partial=args.partial,
                                   cut_plane=args.cut_plane)
        train_loader = DataLoader(train_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  drop_last=True,
                                  num_workers=num_workers)

        test_dataset = ModelNet40(num_points=args.num_points,
                                  partition='test',
                                  gaussian_noise=args.gaussian_noise,
                                  unseen=args.unseen,
                                  factor=args.factor,
                                  same_pointclouds=args.same_pointclouds,
                                  partial=args.partial,
                                  cut_plane=args.cut_plane)
        test_loader = DataLoader(test_dataset,
                                 batch_size=args.test_batch_size,
                                 shuffle=False,
                                 drop_last=False,
                                 num_workers=num_workers)

    else:
        raise Exception("not implemented")

    # model loading
    if args.model == 'dcp':
        net = DCP(args).cuda()
        if args.eval:
            if args.model_path is '':
                model_path = 'checkpoints' + '/' + args.exp_name + '/models/model.best.t7'
            else:
                model_path = args.model_path
                print("Model loaded from ", model_path)
            if not os.path.exists(model_path):
                print("can't find pretrained model")
                return
            net.load_state_dict(torch.load(model_path), strict=False)
        if args.pretrained:
            if args.model_path == '':
                print(
                    'Please specify path to pretrained weights \n For Ex: checkpoints/partial_global_512_identical/models/model.best.t7'
                )
            else:
                model_path = args.model_path
            print("Using pretrained weights stored at:\n{}".format(model_path))
            net.load_state_dict(torch.load(model_path), strict=False)

        if torch.cuda.device_count() > 1:
            net = nn.DataParallel(net)
            print("Let's use", torch.cuda.device_count(), "GPUs!")
    else:
        raise Exception('Not implemented')

    # training and evaluation
    if args.eval:
        if args.one_cloud:  # testing on a single point cloud
            print("one_cloud")
            test_bunny(args, net)

        else:
            test(args, net, test_loader, boardio, textio)

    else:
        train(args, net, train_loader, test_loader, boardio, textio)

    print('FINISH')
    boardio.close()
Exemplo n.º 24
0
def train(args, io):
    train_loader = DataLoader(ModelNet40(partition='train', num_points=args.num_points), num_workers=8,
                              batch_size=args.batch_size, shuffle=True, drop_last=True)
    test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points), num_workers=8,
                             batch_size=args.test_batch_size, shuffle=True, drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    if args.model == 'pointnet':
        model = PointNet(args).to(device)
    elif args.model == 'dgcnn':
        model = DGCNN(args).to(device)
    elif args.model == 'semigcn':
        model = SemiGCN(args).to(device)
    else:
        raise Exception("Not implemented")
    print(str(model))

    model = nn.DataParallel(model)
    print("Let's use", torch.cuda.device_count(), "GPUs!")

    if args.use_sgd:
        print("Use SGD")
        opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=1e-4)
    else:
        print("Use Adam")
        opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
        
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            opt.load_state_dict(checkpoint['opt'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
            
    #scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr, last_epoch=args.start_epoch-1)
    scheduler = torch.optim.lr_scheduler.StepLR(opt, step_size=20, gamma=0.8)#0.7
    #scheduler = torch.optim.lr_scheduler.ExponentialLR(opt, gamma=0.9825, last_epoch=args.start_epoch-1)
    
    criterion = cal_loss

    best_test_acc = 0
    for epoch in range(args.start_epoch, args.epochs):
        #scheduler.step()
        ####################
        # Train
        ####################
        train_loss = 0.0
        count = 0.0
        model.train()
        train_pred = []
        train_true = []
        for data, label in train_loader:
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            opt.zero_grad()
            logits = model(data)
            loss = criterion(logits, label)
            loss.backward()
            opt.step()
            preds = logits.max(dim=1)[1]
            count += batch_size
            train_loss += loss.item() * batch_size
            train_true.append(label.cpu().numpy())
            train_pred.append(preds.detach().cpu().numpy())
        scheduler.step()
        train_true = np.concatenate(train_true)
        train_pred = np.concatenate(train_pred)
        outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (epoch,
                                                                                 train_loss*1.0/count,
                                                                                 metrics.accuracy_score(
                                                                                     train_true, train_pred),
                                                                                 metrics.balanced_accuracy_score(
                                                                                     train_true, train_pred))
        io.cprint(outstr)
        if epoch%10 == 0:
            # save running checkpoint per 10 epoch
            torch.save({'epoch': epoch + 1,
                        'arch': args.model,
                        'state_dict': model.state_dict(),
                        'opt' : opt.state_dict()},
                        'checkpoints/%s/models/checkpoint_latest.pth.tar' % args.exp_name)
        ####################
        # Test
        ####################
        test_loss = 0.0
        count = 0.0
        model.eval()
        test_pred = []
        test_true = []
        for data, label in test_loader:
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            logits = model(data)
            loss = criterion(logits, label)
            preds = logits.max(dim=1)[1]
            count += batch_size
            test_loss += loss.item() * batch_size
            test_true.append(label.cpu().numpy())
            test_pred.append(preds.detach().cpu().numpy())
        test_true = np.concatenate(test_true)
        test_pred = np.concatenate(test_pred)
        test_acc = metrics.accuracy_score(test_true, test_pred)
        avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
        outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (epoch,
                                                                              test_loss*1.0/count,
                                                                              test_acc,
                                                                              avg_per_class_acc)
        io.cprint(outstr)
        if test_acc >= best_test_acc:
            best_test_acc = test_acc
            torch.save({'epoch': epoch + 1,
                        'arch': args.model,
                        'state_dict': model.state_dict(),
                        'opt' : opt.state_dict()},
                        'checkpoints/%s/models/checkpoint_best.pth.tar' % args.exp_name)
Exemplo n.º 25
0
def train(args, io):
    train_loader = DataLoader(ModelNet40(partition='train', num_points=args.num_points), num_workers=8,
                            batch_size=args.batch_size, shuffle=True, drop_last=True)
    test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points), num_workers=8,
                            batch_size=args.test_batch_size, shuffle=True, drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    model = Pct(args).to(device)
    print(str(model))
    model = nn.DataParallel(model)

    if args.use_sgd:
        print("Use SGD")
        opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=1e-4)
    else:
        print("Use Adam")
        opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)

    scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr)
    
    criterion = cal_loss
    best_test_acc = 0

    for epoch in range(args.epochs):
        scheduler.step()
        train_loss = 0.0
        count = 0.0
        model.train()
        train_pred = []
        train_true = []
        idx = 0
        total_time = 0.0
        for data, label in (train_loader):
            data, label = data.to(device), label.to(device).squeeze() 
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            opt.zero_grad()

            start_time = time.time()
            logits = model(data)
            loss = criterion(logits, label)
            loss.backward()
            opt.step()
            end_time = time.time()
            total_time += (end_time - start_time)
            
            preds = logits.max(dim=1)[1]
            count += batch_size
            train_loss += loss.item() * batch_size
            train_true.append(label.cpu().numpy())
            train_pred.append(preds.detach().cpu().numpy())
            idx += 1
            
        print ('train total time is',total_time)
        train_true = np.concatenate(train_true)
        train_pred = np.concatenate(train_pred)
        outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (epoch,
                                                                                train_loss*1.0/count,
                                                                                metrics.accuracy_score(
                                                                                train_true, train_pred),
                                                                                metrics.balanced_accuracy_score(
                                                                                train_true, train_pred))
        io.cprint(outstr)

        ####################
        # Test
        ####################
        test_loss = 0.0
        count = 0.0
        model.eval()
        test_pred = []
        test_true = []
        total_time = 0.0
        for data, label in test_loader:
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            start_time = time.time()
            logits = model(data)
            end_time = time.time()
            total_time += (end_time - start_time)
            loss = criterion(logits, label)
            preds = logits.max(dim=1)[1]
            count += batch_size
            test_loss += loss.item() * batch_size
            test_true.append(label.cpu().numpy())
            test_pred.append(preds.detach().cpu().numpy())
        print ('test total time is', total_time)
        test_true = np.concatenate(test_true)
        test_pred = np.concatenate(test_pred)
        test_acc = metrics.accuracy_score(test_true, test_pred)
        avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
        outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (epoch,
                                                                            test_loss*1.0/count,
                                                                            test_acc,
                                                                            avg_per_class_acc)
        io.cprint(outstr)
        if test_acc >= best_test_acc:
            best_test_acc = test_acc
            torch.save(model.state_dict(), 'checkpoints/%s/models/model.t7' % args.exp_name)
Exemplo n.º 26
0
def train(args, io):
    train_loader = DataLoader(ModelNet40(partition='train',
                                         num_points=args.num_points),
                              num_workers=8,
                              batch_size=args.batch_size,
                              shuffle=True,
                              drop_last=False)
    test_loader = DataLoader(ModelNet40(partition='test',
                                        num_points=args.num_points),
                             num_workers=8,
                             batch_size=args.test_batch_size,
                             shuffle=False,
                             drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    if args.model == 'pointnet':
        model = PointNet(args).to(device)
    elif args.model == 'dgcnn':
        model = DGCNN(args).to(device)
    elif args.model == 'TransformerBaseline':
        model = DGCNN_Transformer(args).to(device)
    elif args.model == 'TemporalTransformer':
        model = DGCNN_TemporalTransformer(args).to(device)
    elif args.model == 'TemporalTransformer_v2':
        model = DGCNN_TemporalTransformer_v2(args).to(device)
    elif args.model == 'TemporalTransformer_v3':
        model = DGCNN_TemporalTransformer_v3(args).to(device)
    elif args.model == 'pi':
        model = pi_DGCNN(args).to(device)
    elif args.model == 'pi2':
        model = pi_DGCNN_v2(args).to(device)
    elif args.model == 'pipoint':
        model = pipoint_DGCNN(args).to(device)
    else:
        raise Exception("Not implemented")
    print(str(model))

    #model = nn.DataParallel(model, device_ids=list(range(3)))
    model = nn.DataParallel(model)
    print("Let's use", torch.cuda.device_count(), "GPUs!")

    # get the number of model parameters
    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    if args.use_sgd:
        print("Use SGD")
        optimizer = optim.SGD(model.parameters(),
                              lr=args.lr * 100,
                              momentum=args.momentum,
                              weight_decay=1e-4)
    else:
        print("Use Adam")
        optimizer = optim.Adam(model.parameters(),
                               lr=args.lr,
                               weight_decay=1e-4)

    scheduler = CosineAnnealingLR(optimizer, args.epochs, eta_min=args.lr)

    criterion = cal_loss

    best_test_acc = 0

    if args.model_path:
        if os.path.isfile(args.model_path):
            print("=> loading checkpoint '{}'".format(args.model_path))
            checkpoint = torch.load(args.model_path)
            print(checkpoint)

            if 'epoch' in checkpoint:
                args.start_epoch = checkpoint['epoch']
            #best_prec1 = checkpoint['best_prec1']
            #best_prec5 = checkpoint['best_prec5']

            if 'state_dict' in checkpoint:
                model.load_state_dict(checkpoint['state_dict'])
            else:
                model.load_state_dict(checkpoint, strict=False)

            if 'optimizer' in checkpoint:
                optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.model_path, args.start_epoch))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    end = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        scheduler.step()

        ####################
        # Train
        ####################
        train_loss = 0.0
        count = 0.0
        model.train()
        train_pred = []
        train_true = []

        batch_time = AverageMeter()
        data_time = AverageMeter()

        end = time.time()
        for i, (data, label) in enumerate(train_loader):
            data_time.update(time.time() - end)

            data, label = data.to(device), label.to(device).squeeze()

            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            optimizer.zero_grad()

            if args.model in ["pi", "pipoint", "pi2"]:
                logits, atts = model(data)
            elif args.model in [
                    "TransformerBaseline", "TemporalTransformer",
                    "TemporalTransformer_v2", "TemporalTransformer_v3"
            ]:
                logits = model(data)
            else:
                logits, degree = model(data)
            '''
            if args.visualize == True:

                print(args.visualize)

                import matplotlib.pyplot as plt
                #cmap = plt.cm.get_cmap("hsv", 30)
                cmap = plt.cm.get_cmap("binary", 40)
                cmap = np.array([cmap(i) for i in range(40)])[:,:3]
                obj = degree[7,:,:3].cpu().numpy()
                obj_degree = degree[7,:,3:].squeeze()
                obj_degree = obj_degree.cpu().numpy().astype(int)
              
                obj_max = np.max(obj_degree)
                obj_min = np.min(obj_degree)

                print(obj_max)
                print(obj_min)

                for i in range(obj_min, obj_max):
                    print("{} : {}".format(i, sum(obj_degree == i)))


                gt = cmap[obj_degree-obj_min, :]
                showpoints(obj, gt, gt, ballradius=3)
            '''
            loss = criterion(logits, label, smoothing=False)
            loss.backward()
            optimizer.step()

            preds = logits.max(dim=1)[1]
            count += batch_size
            train_loss += loss.item() * batch_size
            train_true.append(label.cpu().numpy())
            train_pred.append(preds.detach().cpu().numpy())

            batch_time.update(time.time() - end)
            end = time.time()

            if i % 10 == 0:
                print_str = 'Train {}, loss {}, Time {batch_time.val:.3f} ({batch_time.avg:.3f}), Data {data_time.val:.3f} ({data_time.avg:.3f})'.format(
                    epoch,
                    train_loss * 1.0 / count,
                    batch_time=batch_time,
                    data_time=data_time)
                print(print_str)

        train_true = np.concatenate(train_true)
        train_pred = np.concatenate(train_pred)

        outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (
            epoch, train_loss * 1.0 / count,
            metrics.accuracy_score(train_true, train_pred),
            metrics.balanced_accuracy_score(train_true, train_pred))

        #outstr = 'Train {}, Time {batch_time.val:.3f} ({batch_time.avg:.3f}), Data {data_time.val:.3f} ({data_time.avg:.3f}), loss: {}, train acc: {}, train avg acc: {}'.format(epoch, batch_time=batch_time, data_time=data_time, train_loss*1.0/count, metrics.accuracy_score(train_true, train_pred), metrics.balanced_accuracy_score(train_true, train_pred))
        io.cprint(outstr)

        ####################
        # Test
        ####################
        with torch.no_grad():
            test_loss = 0.0
            count = 0.0
            model.eval()
            test_pred = []
            test_true = []

            batch_time = AverageMeter()
            losses = AverageMeter()

            end = time.time()
            for j, (data, label) in enumerate(test_loader):
                data, label = data.to(device), label.to(device).squeeze()
                data = data.permute(0, 2, 1)
                batch_size = data.size()[0]

                if args.model in ["pi", "pipoint", "pi2"]:
                    logits, atts = model(data)
                elif args.model in [
                        "TransformerBaseline", "TemporalTransformer",
                        "TemporalTransformer_v2", "TemporalTransformer_v3"
                ]:
                    logits = model(data)
                else:
                    logits, degree = model(data)

                loss = criterion(logits, label, smoothing=False)
                preds = logits.max(dim=1)[1]
                count += batch_size
                test_loss += loss.item() * batch_size
                test_true.append(label.cpu().numpy())
                test_pred.append(preds.detach().cpu().numpy())

                batch_time.update(time.time() - end)
                end = time.time()

                if j % 10 == 0:
                    print(
                        'Test {}, Loss {}, Time {batch_time.val:.3f} ({batch_time.avg:.3f})'
                        .format(j,
                                test_loss * 1.0 / count,
                                batch_time=batch_time))

            test_true = np.concatenate(test_true)
            test_pred = np.concatenate(test_pred)
            test_acc = metrics.accuracy_score(test_true, test_pred)
            avg_per_class_acc = metrics.balanced_accuracy_score(
                test_true, test_pred)

            #per_class_acc = metrics.precision_score(test_true, test_pred, average=None)
            #outstr = 'Test {}, loss: {}, train acc: {}, train avg acc: {}'.format(epoch, batch_time=batch_time, data_time=data_time, test_loss*1.0/count, test_acc, avg_per_class_acc)
            outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (
                epoch, test_loss * 1.0 / count, test_acc, avg_per_class_acc)
            #outstr_2 = 'Test per class acc: {}'%per_class_acc
            io.cprint(outstr)
            #io.cprint(outstr_2)
            if args.model in ["pi", "pipoint", "pi2"]:
                for j in range(4):
                    io.cprint('Att {} : {}'.format(j, atts[j].mean().item()))

            is_best = test_acc >= best_test_acc

            if is_best:
                best_test_acc = test_acc

            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                }, is_best, args.exp_name)
Exemplo n.º 27
0
def train(args, io):
    train_loader = DataLoader(ModelNet40(partition='train', num_points=args.num_points), num_workers=8,
                              batch_size=args.batch_size, shuffle=True, drop_last=True)
    test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points), num_workers=8,
                             batch_size=args.test_batch_size, shuffle=True, drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    if args.model == 'pointnet':
        model = PointNet(args).to(device)
    elif args.model == 'dgcnn':
        model = DGCNN_cls(args).to(device)
    else:
        raise Exception("Not implemented")

    print(str(model))

    model = nn.DataParallel(model)
    print("Let's use", torch.cuda.device_count(), "GPUs!")

    if args.use_sgd:
        print("Use SGD")
        opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=1e-4)
    else:
        print("Use Adam")
        opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)

    if args.scheduler == 'cos':
        scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=1e-3)
    elif args.scheduler == 'step':
        scheduler = StepLR(opt, step_size=20, gamma=0.7)

    criterion = cal_loss
    #criterion = cal_loss_v2

    best_test_acc = 0
    for epoch in range(args.epochs):
        ####################
        # Train
        ####################
        train_loss = 0.0
        count = 0.0
        model.train()
        train_pred = []
        train_true = []
        for data, label in train_loader:
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            opt.zero_grad()
            logits = model(data)
            loss = criterion(logits, label)
            loss.backward()
            opt.step()
            preds = logits.max(dim=1)[1]
            count += batch_size
            train_loss += loss.item() * batch_size
            train_true.append(label.cpu().numpy())
            train_pred.append(preds.detach().cpu().numpy())
        if args.scheduler == 'cos':
            scheduler.step()
        elif args.scheduler == 'step':
            if opt.param_groups[0]['lr'] > 1e-5:
                scheduler.step()
            if opt.param_groups[0]['lr'] < 1e-5:
                for param_group in opt.param_groups:
                    param_group['lr'] = 1e-5

        train_true = np.concatenate(train_true)
        train_pred = np.concatenate(train_pred)
        outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (epoch,
                                                                                 train_loss*1.0/count,
                                                                                 metrics.accuracy_score(
                                                                                     train_true, train_pred),
                                                                                 metrics.balanced_accuracy_score(
                                                                                     train_true, train_pred))
        io.cprint(outstr)

        ####################
        # Test
        ####################
        test_loss = 0.0
        count = 0.0
        model.eval()
        test_pred = []
        test_true = []
        for data, label in test_loader:
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            logits = model(data)
            loss = criterion(logits, label)
            preds = logits.max(dim=1)[1]
            count += batch_size
            test_loss += loss.item() * batch_size
            test_true.append(label.cpu().numpy())
            test_pred.append(preds.detach().cpu().numpy())
        test_true = np.concatenate(test_true)
        test_pred = np.concatenate(test_pred)
        test_acc = metrics.accuracy_score(test_true, test_pred)
        avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
        outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (epoch,
                                                                              test_loss*1.0/count,
                                                                              test_acc,
                                                                              avg_per_class_acc)
        io.cprint(outstr)
        if test_acc >= best_test_acc:
            best_test_acc = test_acc
            torch.save(model.state_dict(), 'checkpoints/%s/models/model.t7' % args.exp_name)
Exemplo n.º 28
0
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.utils.data as data
from config import get_test_config
from data import ModelNet40
from models import MeshNet
from utils import point_wise_L1_loss, get_unit_diamond_vertices, axis_aligned_miou, point_wise_mse_loss  #, stochastic_loss

root_path = '/content/drive/MyDrive/DL_diamond_cutting/MeshNet/'

cfg = get_test_config(root_path)
os.environ['CUDA_VISIBLE_DEVICES'] = cfg['cuda_devices']
use_gpu = torch.cuda.is_available()

data_set = ModelNet40(cfg=cfg['dataset'], root_path=root_path, part='test')
data_loader = data.DataLoader(data_set,
                              batch_size=1,
                              num_workers=4,
                              shuffle=False,
                              pin_memory=False)


def test_model(model):

    criterion = nn.L1Loss()
    running_loss = 0.0
    running_l1_loss = 0.0
    running_scale_loss = 0.0
    running_center_loss = 0.0
    running_rotation_loss = 0.0
Exemplo n.º 29
0
def main():
    parser = argparse.ArgumentParser(description='Point Cloud Registration')
    parser.add_argument('--exp_name',
                        type=str,
                        default='exp',
                        metavar='N',
                        help='Name of the experiment')
    parser.add_argument('--model',
                        type=str,
                        default='dcp',
                        metavar='N',
                        choices=['dcp'],
                        help='Model to use, [dcp]')
    parser.add_argument('--emb_nn',
                        type=str,
                        default='pointnet',
                        metavar='N',
                        choices=['pointnet', 'dgcnn'],
                        help='Embedding nn to use, [pointnet, dgcnn]')
    parser.add_argument(
        '--pointer',
        type=str,
        default='transformer',
        metavar='N',
        choices=['identity', 'transformer'],
        help='Attention-based pointer generator to use, [identity, transformer]'
    )
    parser.add_argument('--head',
                        type=str,
                        default='svd',
                        metavar='N',
                        choices=[
                            'mlp',
                            'svd',
                        ],
                        help='Head to use, [mlp, svd]')
    parser.add_argument('--emb_dims',
                        type=int,
                        default=512,
                        metavar='N',
                        help='Dimension of embeddings')
    parser.add_argument('--n_blocks',
                        type=int,
                        default=1,
                        metavar='N',
                        help='Num of blocks of encoder&decoder')
    parser.add_argument('--n_heads',
                        type=int,
                        default=4,
                        metavar='N',
                        help='Num of heads in multiheadedattention')
    parser.add_argument('--ff_dims',
                        type=int,
                        default=1024,
                        metavar='N',
                        help='Num of dimensions of fc in transformer')
    parser.add_argument('--dropout',
                        type=float,
                        default=0.0,
                        metavar='N',
                        help='Dropout ratio in transformer')
    parser.add_argument('--batch_size',
                        type=int,
                        default=32,
                        metavar='batch_size',
                        help='Size of batch)')
    parser.add_argument('--test_batch_size',
                        type=int,
                        default=10,
                        metavar='batch_size',
                        help='Size of batch)')
    parser.add_argument('--epochs',
                        type=int,
                        default=250,
                        metavar='N',
                        help='number of episode to train ')
    parser.add_argument('--use_sgd',
                        action='store_true',
                        default=False,
                        help='Use SGD')
    parser.add_argument(
        '--lr',
        type=float,
        default=0.001,
        metavar='LR',
        help='learning rate (default: 0.001, 0.1 if using sgd)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.9,
                        metavar='M',
                        help='SGD momentum (default: 0.9)')
    parser.add_argument('--no_cuda',
                        action='store_true',
                        default=False,
                        help='enables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1234,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--eval',
                        action='store_true',
                        default=False,
                        help='evaluate the model')
    parser.add_argument('--cycle',
                        type=bool,
                        default=False,
                        metavar='N',
                        help='Whether to use cycle consistency')
    parser.add_argument('--gaussian_noise',
                        type=bool,
                        default=False,
                        metavar='N',
                        help='Wheter to add gaussian noise')
    parser.add_argument('--unseen',
                        type=bool,
                        default=False,
                        metavar='N',
                        help='Wheter to test on unseen category')
    parser.add_argument('--num_points',
                        type=int,
                        default=1024,
                        metavar='N',
                        help='Num of points to use')
    parser.add_argument('--dataset',
                        type=str,
                        default='modelnet40',
                        choices=['modelnet40'],
                        metavar='N',
                        help='dataset to use')
    parser.add_argument('--factor',
                        type=float,
                        default=4,
                        metavar='N',
                        help='Divided factor for rotations')
    parser.add_argument('--model_path',
                        type=str,
                        default='pretrained/dcp_v1.t7',
                        metavar='N',
                        help='Pretrained model path')

    args = parser.parse_args()
    torch.backends.cudnn.deterministic = True
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)

    boardio = SummaryWriter(log_dir='checkpoints/' + args.exp_name)
    _init_(args)

    textio = IOStream('checkpoints/' + args.exp_name + '/run.log')
    textio.cprint(str(args))

    if args.dataset == 'modelnet40':
        train_loader = DataLoader(ModelNet40(
            num_points=args.num_points,
            partition='train',
            gaussian_noise=args.gaussian_noise,
            unseen=args.unseen,
            factor=args.factor),
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  drop_last=True)
        test_loader = DataLoader(ModelNet40(num_points=args.num_points,
                                            partition='test',
                                            gaussian_noise=args.gaussian_noise,
                                            unseen=args.unseen,
                                            factor=args.factor),
                                 batch_size=args.test_batch_size,
                                 shuffle=False,
                                 drop_last=False)
    else:
        raise Exception("not implemented")

    if args.model == 'dcp':
        net = DCP(args).cuda()
        print("Model Parameters")
        count_parameters(net)
        if args.eval:
            if args.model_path is '':
                model_path = 'checkpoints' + '/' + args.exp_name + '/models/model.best.t7'
            else:
                model_path = args.model_path
                print(model_path)
            if not os.path.exists(model_path):
                print("can't find pretrained model")
                return
            net.load_state_dict(torch.load(model_path), strict=False)
        if torch.cuda.device_count() > 1:
            net = nn.DataParallel(net)
            print("Let's use", torch.cuda.device_count(), "GPUs!")
    else:
        raise Exception('Not implemented')

    start = torch.cuda.Event(enable_timing=True)
    end = torch.cuda.Event(enable_timing=True)

    if args.eval:
        start.record()
        test(args, net, test_loader, boardio, textio)
        end.record()
        torch.cuda.synchronize()
        print("Time to test: ", start.elapsed_time(end))
    else:
        train(args, net, train_loader, test_loader, boardio, textio)

    print('FINISH')
    boardio.close()
Exemplo n.º 30
0
def test(args, io):
    test_loader = DataLoader(ModelNet40(partition='test',
                                        num_points=args.num_points),
                             batch_size=args.test_batch_size,
                             shuffle=False,
                             drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    if args.model == 'pointnet':
        model = PointNet(args).to(device)
    elif args.model == 'dgcnn':
        model = DGCNN(args).to(device)
    else:
        model = PaiNet(args).to(device)
        #raise Exception("Not implemented")
    print(str(model))
    # model = DGCNN(args).to(device)
    model = nn.DataParallel(model)
    if os.path.exists('checkpoints/%s/models/model_%s.t7' %
                      (args.exp_name, args.model)):
        checkpoint_dict = torch.load('./checkpoints/%s/models/model_%s.t7' %
                                     (args.exp_name, args.model),
                                     map_location=device)
        model.load_state_dict(checkpoint_dict, strict=True)
        print("Load model from './checkpoints/%s/models/model_%s_2048.t7 !'" %
              (args.exp_name, args.model))
    model = model.eval()
    test_acc = 0.0
    count = 0.0
    test_true = []
    test_pred = []
    feats = []
    for data, label in test_loader:
        data, label = data.to(device), label.to(device).squeeze()
        data = data.permute(0, 2, 1)
        batch_size = data.size()[0]
        logits, feat = model(data)
        preds = logits.max(dim=1)[1]
        test_true.append(label.cpu().numpy())
        test_pred.append(preds.detach().cpu().numpy())
        feats.append(feat.detach().cpu())
    feats = torch.cat(feats)
    distances = torch.norm((feats - feats[6:7]), dim=1)
    _, index = torch.topk(-distances, k=5)

    test = ModelNet40(1024, 'test')
    points = test.data[index]
    vis = o3d.visualization.Visualizer()
    vis.create_window(visible=True)
    pcd = o3d.geometry.PointCloud()
    for i, data in enumerate(points):
        pcd.points = o3d.utility.Vector3dVector(data)
        data[:] = 0.2
        pcd.colors = o3d.utility.Vector3dVector(data)
        vis.add_geometry(pcd)

        ctr = vis.get_view_control()
        ctr.rotate(-260, 100)

        vis.update_geometry()
        vis.poll_events()
        vis.update_renderer()
        time.sleep(1)
        vis.capture_screen_image('d{}.png'.format(i))
        vis.remove_geometry(pcd)

    test_true = np.concatenate(test_true)
    test_pred = np.concatenate(test_pred)
    test_acc = metrics.accuracy_score(test_true, test_pred)
    avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
    outstr = 'Test :: test acc: %.6f, test avg acc: %.6f' % (test_acc,
                                                             avg_per_class_acc)
    io.cprint(outstr)