Esempio n. 1
0
def pass_once(args, points, object_id=0, obj_index=0, max_part_count=6, project_to_plane=False, transform_matrix=None):
    output_dir = os.path.join(BASE_DIR, '..', 'visualize_output')
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    output_object_dir = os.path.join(output_dir, str(object_id))
    if not os.path.exists(output_object_dir):
        os.mkdir(output_object_dir)

    device = torch.device("cuda" if args.use_cuda else "cpu")

    #Try to load models
    model = DGCNN(args, part_number=6).to(device)
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(args.model_path, map_location=device))
    model = model.eval()

    logits = model(points.unsqueeze(0).permute(0, 2, 1))

    #for step_id in range(step_count):
    preds = logits[0, :, :].max(dim=1)[1]
    part_points = get_part_point_cloud_from_label(points, preds, max_part_count)

    #pts = load_pts_files('/home/tianxu/Desktop/pair-group/Thesis-project/dgcnn/dgcnn/tensorflow/part_seg/PartAnnotation/03642806/points/1a46d6683450f2dd46c0b76a60ee4644.pts')
    if project_to_plane:
        plot3d_pts_in_camera_plane(part_points, transform_matrix,
                                    pts_name=[f'part {i}: {part_points[i].shape[0]} pts' for i in range(max_part_count)], 
                                    title_name=F'object {object_id}',
                                    show_fig=False, save_fig=True,
                                    save_path=output_object_dir, filename=F'obj {obj_index}', s=10)
    else:
        plot3d_pts([part_points], pts_name=[[f'part {i}: {part_points[i].shape[0]} pts' for i in range(max_part_count)]], 
                                    title_name=F'object {object_id}',
                                    sub_name=F'obj {obj_index}', show_fig=False, save_fig=True,
                                    save_path=output_object_dir, s=10)
Esempio n. 2
0
def pass_once(args,
              points,
              object_id=0,
              step_count=13,
              max_part_count=10,
              project_to_plane=False,
              transform_matrix=None):
    output_dir = os.path.join(BASE_DIR, '..', 'visualize_output')
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    output_object_dir = os.path.join(output_dir, str(object_id))
    if not os.path.exists(output_object_dir):
        os.mkdir(output_object_dir)

    device = torch.device("cuda" if args.use_cuda else "cpu")

    #Try to load models
    model = DGCNN(args).to(device)
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(args.model_path, map_location=device))
    model = model.eval()

    permuted_data = points.permute(0, 2, 1)
    logits = model(permuted_data)

    for step_id in range(step_count):
        step_preds = logits[step_id, :, :].max(dim=1)[1]
        step_points = points[step_id, :, :]
        part_points = get_part_point_cloud_from_label(step_points, step_preds,
                                                      max_part_count)

        if project_to_plane:
            plot3d_pts_in_camera_plane(
                part_points,
                transform_matrix,
                pts_name=[
                    f'part {i}: {part_points[i].shape[0]} pts'
                    for i in range(max_part_count)
                ],
                title_name=F'object {object_id} step {step_id}',
                show_fig=False,
                save_fig=True,
                save_path=output_object_dir,
                filename=F'step {step_id}',
                s=10)
        else:
            plot3d_pts([part_points],
                       pts_name=[[
                           f'part {i}: {part_points[i].shape[0]} pts'
                           for i in range(max_part_count)
                       ]],
                       title_name=F'object {object_id} step {step_id}',
                       sub_name=F'step {step_id}',
                       show_fig=True,
                       save_fig=True,
                       save_path=output_object_dir,
                       s=10)
Esempio n. 3
0
def train(args, io):
    data_dir = os.path.join(BASE_DIR, '..', 'part_seg', 'hdf5_data')
    #data_dir = '/home/tianxu/Desktop/pair-group/Thesis-project/dgcnn/dgcnn/tensorflow/part_seg/hdf5_data'

    data, label = load_h5_data_their_data(data_dir, 5, args.num_points)
    dataset = TensorDataset(data, label)

    train_loader, test_loader = get_data_loaders(dataset, args.batch_size)

    '''train_loader = DataLoader(data, num_workers=8,
                              batch_size=args.batch_size, shuffle=True, drop_last=True)
    test_loader = DataLoader(, num_workers=8,
                             batch_size=args.test_batch_size, shuffle=True, drop_last=False)'''

    device = torch.device("cuda" if args.use_cuda else "cpu")

    #Try to load models
    if args.model == 'pointnet':
        model = PointNet(args).to(device)
    elif args.model == 'dgcnn':
        model = DGCNN(args).to(device)
    else:
        raise Exception("Not implemented")
    print(str(model))

    model = nn.DataParallel(model)

    if os.path.exists(args.model_path):
        io.cprint("Loading existing model...")
        try:
            model.load_state_dict(torch.load(args.model_path, map_location=device))
            io.cprint("Existing model loaded")
        except:
            io.cprint("Can't load existing model, start from new model...")

    model.float()
    print("Let's use", torch.cuda.device_count(), "GPUs!")

    if args.use_sgd:
        print("Use SGD")
        opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=1e-4)
    else:
        print("Use Adam")
        opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)

    scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr)
    
    criterion = cal_min_pairwise_seg_loss # cross_entropy_loss

    train_loss_list = []
    train_acc_list = []
    train_balanced_acc_list = []
    test_loss_list = []
    test_acc_list = []
    test_balanced_acc_list = []
    max_test_acc = 0
    max_acc_epoch = 0
    min_test_loss = math.inf
    min_loss_epoch = 0

    starting_epoch = 0
    training_backup_filepath = F'checkpoints_perm_loss_their_data/{args.exp_name}/models/training_backup.txt'
    if os.path.exists(training_backup_filepath):
        try:
            with open(training_backup_filepath, 'r') as f:
                starting_epoch = int(f.readline()) + 1
                if starting_epoch >= args.epochs - 1:
                    starting_epoch = 0
                else:
                    max_test_acc = float(f.readline())
                    min_test_loss = float(f.readline())
        except:
            io.cprint("Error when reading epoch record file")

    io.cprint(F"Starting from epoch {starting_epoch}")
    for epoch in range(starting_epoch, args.epochs):
        ####################
        # Train
        ####################
        train_loss = 0.0
        count = 0.0
        model.train()
        train_pred = []
        train_true = []
        start_time = time.time()
        for data, label in train_loader:
            data, label = data.to(device), label.to(device)
            # data: batch_size x point_num x 3
            # label: batch_size x point_num

            batch_size = data.shape[0]
            opt.zero_grad()
            logits = model(data.permute(0, 2, 1))

            # TODO: update for cross entropy
            loss, permuted_labels = criterion(logits, label)
            min_loss = cross_entropy_loss(logits, permuted_labels)
            min_loss.backward()

            opt.step()
            preds = logits.max(dim=2)[1]
            count += batch_size
            train_loss += loss.item() * batch_size
            train_true.append(permuted_labels.cpu().view(-1).numpy())
            train_pred.append(preds.detach().view(-1).cpu().numpy())
        train_true = np.concatenate(train_true)
        train_pred = np.concatenate(train_pred)

        train_loss = train_loss*1.0/count
        train_loss_list.append(train_loss)

        train_acc = metrics.accuracy_score(train_true, train_pred)
        train_acc_list.append(train_acc)

        outstr = 'Train %d, loss: %.6f, train acc: %.6f' % (epoch,
                                                            train_loss,
                                                            train_acc)
        io.cprint(outstr)

        scheduler.step()

        ####################
        # Test
        ####################
        test_loss = 0.0
        count = 0.0
        model.eval()
        test_pred = []
        test_true = []
        for data, label in test_loader:
            data, label = data.to(device), label.to(device)

            batch_size = data.shape[0]
            logits = model(data.permute(0, 2, 1))
            
            loss, permuted_labels = criterion(logits, label)
            preds = logits.max(dim=2)[1]
            count += batch_size
            test_loss += loss.item() * batch_size
            test_true.append(permuted_labels.cpu().view(-1).numpy())
            test_pred.append(preds.detach().cpu().view(-1).numpy())
        test_true = np.concatenate(test_true)
        test_pred = np.concatenate(test_pred)
        test_acc = metrics.accuracy_score(test_true, test_pred)

        test_loss = test_loss*1.0/count
        test_loss_list.append(test_loss)
        test_acc_list.append(test_acc)

        if test_acc > max_test_acc:
            max_test_acc = test_acc
            max_acc_epoch = epoch
            torch.save(model.state_dict(), 'checkpoints_perm_loss_their_data/%s/models/model.h5' % args.exp_name)
        if test_loss < min_test_loss:
            min_test_loss = test_loss
            min_loss_epoch = epoch
        
        end_time = time.time()
        time_per_epoch = end_time - start_time
        outstr = 'Test %d, loss: %.6f, test acc: %.6f, total time: %.6f s\n' % (epoch,
                                                                                test_loss,
                                                                                test_acc,
                                                                                time_per_epoch)
        io.cprint(outstr)

        with open(training_backup_filepath, 'w') as f:
            f.write(str(epoch) + '\n')
            f.write(str(max_test_acc) + '\n')
            f.write(str(min_test_loss))       

    fig = plt.figure(figsize=(17, 10))

    loss_ax = fig.add_subplot(1, 2, 1)
    acc_ax = fig.add_subplot(1, 2, 2)

    loss_ax.plot(train_loss_list)
    loss_ax.plot(test_loss_list)
    loss_ax.set_title(F'Cross-entropy loss: \nMinimum test loss: {min_test_loss:.5f}(Epoch: {min_loss_epoch})')
    loss_ax.set_ylabel('loss')
    loss_ax.set_xlabel('epoch')
    loss_ax.legend([F'train', \
                F'test'], loc='upper right')

    acc_ax.plot(train_acc_list)
    acc_ax.plot(test_acc_list)
    acc_ax.set_title(F'Accuracy: \nMaximum test accuracy: {max_test_acc:.5f}(Epoch: {max_acc_epoch})')
    acc_ax.set_ylabel('acc')
    acc_ax.set_xlabel('epoch')
    acc_ax.legend([F'train', \
                F'test'], loc='upper right')
    #plt.show()
    fig.savefig('./log_perm_loss_their_data/model_loss_acc.png')
Esempio n. 4
0
def test(args, io):
    data_dir = os.path.join(BASE_DIR, '..', 'dataset', 'hdf5-Sapien', 'cabinets')
    total_objects = os.listdir(data_dir)
    np.random.shuffle(total_objects)

    #object_ids, step_ids, data, label, step_counts, part_counts = load_(data_dir, os.listdir(data_dir), args.num_points)
    data, label = load_h5_data_their_data(data_dir, os.listdir(data_dir), args.num_points)

    # for i, object_id in enumerate(object_ids):
    #     object_id = object_id.item()
    #     yml_load_path = os.path.join(BASE_DIR, '..', 'dataset', 'render-Sapien', 'cabinets', str(object_id), 'pose-transformation.yml')                                                     
    #     with open(yml_load_path, 'r') as f:
    #         yaml_dict = yaml.load(f)
    #     proj_matrix = np.array(yaml_dict['projMat']).reshape(4, 4)
    #     #view_matrix = np.array(yaml_dict['view_matrix_cam_1']).reshape(4, 4)
    #     #world_to_image_matrix = np.dot(view_matrix, proj_matrix)
    #     #pass_once(args, data[i], object_id=object_id, step_id=step_ids[i], project_to_plane=True, transform_matrix=proj_matrix)
    #     pass_once(args, data[i], object_id=object_id, step_count=step_counts[i], project_to_plane=True, transform_matrix=proj_matrix)

    dataset = TensorDataset(data, label)

    _, test_loader = get_data_loaders(dataset, batch_size=args.batch_size, val_percentage=1)

    device = torch.device("cuda" if args.use_cuda else "cpu")

    #Try to load models
    model = DGCNN(args).to(device)
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(args.model_path, map_location=device))
    model = model.eval()
    test_acc = 0.0
    #count = 0.0
    test_pred = []
    test_true = []
    for data, label, step_counts, part_counts in test_loader:
        data, label = data.to(device), label.to(device)

        # remove padded values in label and data, then vstack them
        max_step_size = label.shape[1]
        data = data.view(data.shape[0]*data.shape[1], -1, 3)
        label = label.view(label.shape[0]*label.shape[1], -1)
        preserved_indices = torch.unique((label != -1).nonzero()[:, 0])
        data = data[preserved_indices, :, :]
        label = label[preserved_indices, :]
        part_counts = part_counts.unsqueeze(1).repeat(1, max_step_size).view(-1)[preserved_indices]

        permuted_data = data.permute(0, 2, 1)
        batch_size = permuted_data.size()[0]
        logits = model(permuted_data)
        
        loss, permuted_labels = cal_min_pairwise_seg_loss(logits, label, part_counts, step_counts)
        preds = logits.max(dim=2)[1]
        #count += batch_size
        #test_loss += loss.item() * batch_size
        test_true.append(permuted_labels.cpu().view(-1).numpy())
        test_pred.append(preds.detach().cpu().view(-1).numpy())

    test_true = np.concatenate(test_true)
    test_pred = np.concatenate(test_pred)
    test_acc = metrics.accuracy_score(test_true, test_pred)
    outstr = 'Test :: test acc: %.6f'%(test_acc)
    io.cprint(outstr)
Esempio n. 5
0
def test(args, io):
    category = 'laptops-similar-frame'
    perm_loss = True
    data_dir = os.path.join(BASE_DIR, '..', 'part_seg', 'Sapien_part_seg', category)
    total_objects = os.listdir(data_dir)
    np.random.shuffle(total_objects)

    object_ids, data, label, step_counts, part_counts = load_h5_data_seg_Sapien(data_dir, total_objects, args.num_points)

    for i, object_id in enumerate(object_ids):
        yml_load_path = os.path.join(BASE_DIR, '..', 'part_seg', 'Sapien_part_seg', F'{category}-render', str(object_id), 'pose-transformation.yml')                                                     
        with open(yml_load_path, 'r') as f:
            yaml_dict = yaml.load(f)
        proj_matrix = np.array(yaml_dict['projMat']).reshape(4, 4).T
        #view_matrix = np.array(yaml_dict['view_matrix_cam_1']).reshape(4, 4)
        #world_to_image_matrix = np.dot(view_matrix, proj_matrix)
        #pass_once(args, data[i], object_id=object_id, step_id=step_ids[i], project_to_plane=True, transform_matrix=proj_matrix)
        pass_once(args, data[i], object_id=object_id, obj_index=i, project_to_plane=True, transform_matrix=proj_matrix)

    dataset = TensorDataset(data, label, step_counts, part_counts)

    _, test_loader = get_data_loaders(dataset, batch_size=args.test_batch_size, val_percentage=1)

    device = torch.device("cuda" if args.use_cuda else "cpu")

    #Try to load models
    model = DGCNN(args, part_number=6).to(device)
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(args.model_path, map_location=device))
    model = model.eval()

    test_acc = 0.0
    count = 0.0
    test_loss = 0.0
    test_pred = []
    test_true = []
    for data, label, step_counts, part_counts in test_loader:
        data, label = data.to(device), label.to(device)

        batch_size = data.shape[0]
        logits = model(data.permute(0, 2, 1))
        
        if perm_loss:
            loss, permuted_labels = cal_min_pairwise_seg_loss(logits, label)
            label = permuted_labels
        else:
            loss = cal_seg_loss(logits, label)

        preds = logits.max(dim=2)[1]
        count += batch_size
        test_loss += loss.item() * batch_size
        test_true.append(label.cpu().view(-1).numpy())
        test_pred.append(preds.detach().cpu().view(-1).numpy())
    test_true = np.concatenate(test_true)
    test_pred = np.concatenate(test_pred)
    test_acc = metrics.accuracy_score(test_true, test_pred)

    test_loss = test_loss*1.0/count
    #test_true = np.concatenate(test_true)
    #test_pred = np.concatenate(test_pred)
    test_acc = metrics.accuracy_score(test_true, test_pred)
    outstr = 'Test :: test acc: %.6f, test loss: %.6f'%(test_acc, test_loss)
    io.cprint(outstr)