def main():

    #create pcd object list to save the reconstructed patch per capsule
    pcd_list = []
    for i in range(opt.latent_caps_size):
        pcd_ = PointCloud()
        pcd_list.append(pcd_)
    colors = plt.cm.tab20((np.arange(20)).astype(int))
    #random selected viz capsules
    hight_light_caps = [
        np.random.randint(0, opt.latent_caps_size) for r in range(10)
    ]

    USE_CUDA = True
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    capsule_net = PointCapsNet(opt.prim_caps_size, opt.prim_vec_size,
                               opt.latent_caps_size, opt.latent_caps_size,
                               opt.num_points)

    if opt.model != '':
        capsule_net.load_state_dict(torch.load(opt.model))
    else:
        print('pls set the model path')

    if USE_CUDA:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        capsule_net = torch.nn.DataParallel(capsule_net)
        capsule_net.to(device)

    if opt.dataset == 'shapenet_part':
        test_dataset = shapenet_part_loader.PartDataset(classification=True,
                                                        npoints=opt.num_points,
                                                        split='test')
        test_dataloader = torch.utils.data.DataLoader(
            test_dataset,
            batch_size=opt.batch_size,
            shuffle=True,
            num_workers=4)
    elif opt.dataset == 'shapenet_core13':
        test_dataset = shapenet_core13_loader.ShapeNet(normal=False,
                                                       npoints=opt.num_points,
                                                       train=False)
        test_dataloader = torch.utils.data.DataLoader(
            test_dataset,
            batch_size=opt.batch_size,
            shuffle=True,
            num_workers=4)
    elif opt.dataset == 'shapenet_core55':
        test_dataset = shapenet_core55_loader.Shapnet55Dataset(
            batch_size=opt.batch_size,
            npoints=opt.num_points,
            shuffle=True,
            train=False)

    capsule_net.eval()
    if 'test_dataloader' in locals().keys():
        test_loss_sum = 0
        for batch_id, data in enumerate(test_dataloader):
            points, _ = data
            if (points.size(0) < opt.batch_size):
                break
            points = Variable(points)
            points = points.transpose(2, 1)
            if USE_CUDA:
                points = points.cuda()
            latent_caps, reconstructions = capsule_net(points)

            for pointset_id in range(opt.batch_size):
                prc_r_all = reconstructions[pointset_id].transpose(
                    1, 0).contiguous().data.cpu()
                prc_r_all_point = PointCloud()
                prc_r_all_point.points = Vector3dVector(prc_r_all)
                colored_re_pointcloud = PointCloud()
                jc = 0
                for j in range(opt.latent_caps_size):
                    current_patch = torch.zeros(
                        int(opt.num_points / opt.latent_caps_size), 3)
                    for m in range(int(opt.num_points / opt.latent_caps_size)):
                        current_patch[m, ] = prc_r_all[
                            opt.latent_caps_size * m + j,
                        ]  # the reconstructed patch of the capsule m is not saved continuesly in the output reconstruction.
                    pcd_list[j].points = Vector3dVector(current_patch)
                    if (j in hight_light_caps):
                        pcd_list[j].paint_uniform_color(
                            [colors[jc, 0], colors[jc, 1], colors[jc, 2]])
                        jc += 1
                    else:
                        pcd_list[j].paint_uniform_color([0.8, 0.8, 0.8])
                    colored_re_pointcloud += pcd_list[j]
                draw_geometries([colored_re_pointcloud])


# test process for 'shapenet_core55'
    else:
        test_loss_sum = 0
        while test_dataset.has_next_batch():
            batch_id, points_ = test_dataset.next_batch()
            points = torch.from_numpy(points_)
            if (points.size(0) < opt.batch_size):
                break
            points = Variable(points)
            points = points.transpose(2, 1)
            if USE_CUDA:
                points = points.cuda()
            latent_caps, reconstructions = capsule_net(points)
            for pointset_id in range(opt.batch_size):
                prc_r_all = reconstructions[pointset_id].transpose(
                    1, 0).contiguous().data.cpu()
                prc_r_all_point = PointCloud()
                prc_r_all_point.points = Vector3dVector(prc_r_all)
                colored_re_pointcloud = PointCloud()
                jc = 0
                for j in range(opt.latent_caps_size):
                    current_patch = torch.zeros(
                        int(opt.num_points / opt.latent_caps_size), 3)
                    for m in range(int(opt.num_points / opt.latent_caps_size)):
                        current_patch[m, ] = prc_r_all[
                            opt.latent_caps_size * m + j,
                        ]  # the reconstructed patch of the capsule m is not saved continuesly in the output reconstruction.
                    pcd_list[j].points = Vector3dVector(current_patch)
                    if (j in hight_light_caps):
                        pcd_list[j].paint_uniform_color(
                            [colors[jc, 0], colors[jc, 1], colors[jc, 2]])
                        jc += 1
                    else:
                        pcd_list[j].paint_uniform_color([0.8, 0.8, 0.8])
                    colored_re_pointcloud += pcd_list[j]

                draw_geometries([colored_re_pointcloud])
def main():
    USE_CUDA = True
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    capsule_net = PointCapsNet(opt.prim_caps_size, opt.prim_vec_size,
                               opt.latent_caps_size, opt.latent_vec_size,
                               opt.num_points)

    if opt.model != '':
        capsule_net.load_state_dict(torch.load(opt.model))
    else:
        print('pls set the model path')

    if USE_CUDA:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        capsule_net = torch.nn.DataParallel(capsule_net)
        capsule_net.to(device)

    if opt.dataset == 'shapenet_part':
        if opt.save_training:
            split = 'train'
        else:
            split = 'test'
        dataset = shapenet_part_loader.PartDataset(classification=True,
                                                   npoints=opt.num_points,
                                                   split=split)
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=opt.batch_size,
                                                 shuffle=True,
                                                 num_workers=4)
    elif opt.dataset == 'shapenet_core13':
        dataset = shapenet_core13_loader.ShapeNet(normal=False,
                                                  npoints=opt.num_points,
                                                  train=opt.save_training)
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=opt.batch_size,
                                                 shuffle=True,
                                                 num_workers=4)
    elif opt.dataset == 'shapenet_core55':
        dataset = shapenet_core55_loader.Shapnet55Dataset(
            batch_size=opt.batch_size,
            npoints=opt.num_points,
            shuffle=True,
            train=opt.save_training)
    elif opt.dataset == 'modelnet40':
        dataset = modelnet40_loader.ModelNetH5Dataset(
            batch_size=opt.batch_size,
            npoints=opt.num_points,
            shuffle=True,
            train=opt.save_training)

# init saving process
    data_size = 0
    dataset_main_path = os.path.abspath(os.path.join(BASE_DIR,
                                                     '../../dataset'))
    out_file_path = os.path.join(dataset_main_path, opt.dataset, 'latent_caps')
    if not os.path.exists(out_file_path):
        os.makedirs(out_file_path)

    if opt.save_training:
        out_file_name = out_file_path + "/saved_train_wo_part_label.h5"
    else:
        out_file_name = out_file_path + "/saved_test_wo_part_label.h5"
    if os.path.exists(out_file_name):
        os.remove(out_file_name)
    fw = h5py.File(out_file_name, 'w', libver='latest')
    dset = fw.create_dataset("data", (
        1,
        opt.latent_caps_size,
        opt.latent_vec_size,
    ),
                             maxshape=(None, opt.latent_caps_size,
                                       opt.latent_vec_size),
                             dtype='<f4')
    dset_c = fw.create_dataset("cls_label", (1, ),
                               maxshape=(None, ),
                               dtype='uint8')
    fw.swmr_mode = True

    #  process for 'shapenet_part' or 'shapenet_core13'
    capsule_net.eval()
    if 'dataloader' in locals().keys():
        test_loss_sum = 0
        for batch_id, data in enumerate(dataloader):
            points, cls_label = data
            if (points.size(0) < opt.batch_size):
                break
            points = Variable(points)
            points = points.transpose(2, 1)
            if USE_CUDA:
                points = points.cuda()
            latent_caps, reconstructions = capsule_net(points)

            # write the output latent caps and cls into file
            data_size = data_size + points.size(0)
            new_shape = (
                data_size,
                opt.latent_caps_size,
                opt.latent_vec_size,
            )
            dset.resize(new_shape)
            dset_c.resize((data_size, ))

            latent_caps_ = latent_caps.cpu().detach().numpy()
            dset[data_size - points.size(0):data_size, :, :] = latent_caps_
            dset_c[data_size -
                   points.size(0):data_size] = cls_label.squeeze().numpy()

            dset.flush()
            dset_c.flush()
            print('accumalate of batch %d, and datasize is %d ' %
                  ((batch_id), (dset.shape[0])))

        fw.close()


#  process for 'shapenet_core55' or 'modelnet40'
    else:
        while dataset.has_next_batch():
            batch_id, points_ = dataset.next_batch()
            points = torch.from_numpy(points_)
            if (points.size(0) < opt.batch_size):
                break
            points = Variable(points)
            points = points.transpose(2, 1)
            if USE_CUDA:
                points = points.cuda()
            latent_caps, reconstructions = capsule_net(points)

            data_size = data_size + points.size(0)
            new_shape = (
                data_size,
                opt.latent_caps_size,
                opt.latent_vec_size,
            )
            dset.resize(new_shape)
            dset_c.resize((data_size, ))

            latent_caps_ = latent_caps.cpu().detach().numpy()
            dset[data_size - points.size(0):data_size, :, :] = latent_caps_
            dset_c[data_size -
                   points.size(0):data_size] = cls_label.squeeze().numpy()

            dset.flush()
            dset_c.flush()
            print('accumalate of batch %d, and datasize is %d ' %
                  ((batch_id), (dset.shape[0])))
        fw.close()
Exemple #3
0
def main(CLASS="None"):
    if CLASS == "None": exit()

    USE_CUDA = True
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    capsule_net = PointCapsNet(opt.prim_caps_size, opt.prim_vec_size,
                               opt.latent_caps_size, opt.latent_vec_size,
                               opt.num_points)

    if opt.model != '':
        capsule_net.load_state_dict(torch.load(opt.model))
    else:
        print('pls set the model path')

    if USE_CUDA:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        capsule_net = torch.nn.DataParallel(capsule_net)
        capsule_net.to(device)

    if opt.dataset == 'shapenet_part':
        if opt.save_training:
            split = 'train'
        else:
            split = 'test'
        dataset = shapenet_part_loader.PartDataset(classification=True,
                                                   npoints=opt.num_points,
                                                   split=split,
                                                   class_choice=CLASS)
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=opt.batch_size,
                                                 shuffle=True,
                                                 num_workers=4)
    elif opt.dataset == 'shapenet_core13':
        dataset = shapenet_core13_loader.ShapeNet(normal=False,
                                                  npoints=opt.num_points,
                                                  train=opt.save_training)
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=opt.batch_size,
                                                 shuffle=True,
                                                 num_workers=4)
    elif opt.dataset == 'shapenet_core55':
        dataset = shapenet_core55_loader.Shapnet55Dataset(
            batch_size=opt.batch_size,
            npoints=opt.num_points,
            shuffle=True,
            train=opt.save_training)
    elif opt.dataset == 'modelnet40':
        dataset = modelnet40_loader.ModelNetH5Dataset(
            batch_size=opt.batch_size,
            npoints=opt.num_points,
            shuffle=True,
            train=opt.save_training)

    #  process for 'shapenet_part' or 'shapenet_core13'
    capsule_net.eval()

    count = 0

    if 'dataloader' in locals().keys():
        test_loss_sum = 0
        for batch_id, data in enumerate(dataloader):
            points, _ = data
            if (points.size(0) < opt.batch_size):
                break
            points = Variable(points)
            points = points.transpose(2, 1)
            if USE_CUDA:
                points = points.cuda()
            latent_caps, _ = capsule_net(points)

            for i in range(opt.batch_size):
                torch.save(
                    latent_caps[i, :],
                    "tmp_lcs/latcaps_%s_%03d.pt" % (CLASS.lower(), count))
                count += 1
                if (count + 1) % 50 == 0: print(count + 1)

    else:
        pass
Exemple #4
0
def main():
    USE_CUDA = True
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    capsule_net = PointCapsNet(opt.prim_caps_size, opt.prim_vec_size,
                               opt.latent_caps_size, opt.latent_vecs_size,
                               opt.num_points)

    if opt.model != '':
        capsule_net.load_state_dict(torch.load(opt.model))
    else:
        print('pls set the model path')

    if USE_CUDA:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        capsule_net = torch.nn.DataParallel(capsule_net)
        capsule_net.to(device)

    if opt.dataset == 'shapenet_part':
        test_dataset = shapenet_part_loader.PartDataset(classification=True,
                                                        npoints=opt.num_points,
                                                        split='test')
        test_dataloader = torch.utils.data.DataLoader(
            test_dataset,
            batch_size=opt.batch_size,
            shuffle=True,
            num_workers=4)
    elif opt.dataset == 'shapenet_core13':
        test_dataset = shapenet_core13_loader.ShapeNet(normal=False,
                                                       npoints=opt.num_points,
                                                       train=False)
        test_dataloader = torch.utils.data.DataLoader(
            test_dataset,
            batch_size=opt.batch_size,
            shuffle=True,
            num_workers=4)
    elif opt.dataset == 'shapenet_core55':
        test_dataset = shapenet_core55_loader.Shapnet55Dataset(
            batch_size=opt.batch_size,
            npoints=opt.num_points,
            shuffle=True,
            train=False)

# test process for 'shapenet_part' or 'shapenet_core13'
    capsule_net.eval()
    if 'test_dataloader' in locals().keys():
        test_loss_sum = 0
        for batch_id, data in enumerate(test_dataloader):
            points, _ = data
            if (points.size(0) < opt.batch_size):
                break
            points = Variable(points)
            points = points.transpose(2, 1)
            if USE_CUDA:
                points = points.cuda()
            latent_caps, reconstructions = capsule_net(points)
            test_loss = capsule_net.module.loss(points, reconstructions)
            test_loss_sum += test_loss.item()
            print('accumalate of batch %d loss is : %f' %
                  (batch_id, test_loss.item()))
        test_loss_sum = test_loss_sum / float(len(test_dataloader))
        print('test loss is : %f' % (test_loss_sum))


# test process for 'shapenet_core55'
    else:
        test_loss_sum = 0
        while test_dataset.has_next_batch():
            batch_id, points_ = test_dataset.next_batch()
            points = torch.from_numpy(points_)
            if (points.size(0) < opt.batch_size):
                break
            points = Variable(points)
            points = points.transpose(2, 1)
            if USE_CUDA:
                points = points.cuda()
            latent_caps, reconstructions = capsule_net(points)
            test_loss = capsule_net.module.loss(points, reconstructions)
            test_loss_sum += test_loss.item()
            print('accumalate of batch %d loss is : %f' %
                  (batch_id, test_loss.item()))
        test_loss_sum = test_loss_sum / float(len(test_dataloader))
        print('test loss is : %f' % (test_loss_sum))
Exemple #5
0
def main():
    USE_CUDA = True
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    #capsule_net = BetaPointCapsNet(opt.prim_caps_size, opt.prim_vec_size, opt.latent_caps_size, opt.latent_vec_size, opt.num_points)
    capsule_net = PointCapsNet(opt.prim_caps_size, opt.prim_vec_size, opt.latent_caps_size, opt.latent_vec_size, opt.num_points)
  
    if opt.model != '':
        capsule_net.load_state_dict(torch.load(opt.model))
 
    if USE_CUDA:       
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        capsule_net = torch.nn.DataParallel(capsule_net)
        capsule_net.to(device)

    # create folder to save trained models
    if not os.path.exists(opt.outf):
        os.makedirs(opt.outf)

    # create folder to save logs
    if LOGGING:
        log_dir='./logs'+'/'+opt.dataset+'_dataset_'+str(opt.latent_caps_size)+'caps_'+str(opt.latent_vec_size)+'vec'+'_batch_size_'+str(opt.batch_size)
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        logger = Logger(log_dir)

    # select dataset    
    if opt.dataset=='shapenet_part':
        train_dataset = shapenet_part_loader.PartDataset(classification=True, npoints=opt.num_points, split='train')
        train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=4)        
    elif opt.dataset=='shapenet_core13':
        train_dataset = shapenet_core13_loader.ShapeNet(normal=False, npoints=opt.num_points, train=True)
        train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=4)
    elif opt.dataset=='shapenet_core55':
        train_dataset = shapenet_core55_loader.Shapnet55Dataset(batch_size=opt.batch_size, npoints=opt.num_points, shuffle=True, train=True)

    # BVAE CONFIGURATIONS HARDCODING
    #loss_mode = 'gaussian' # loss_mode was decoder_list in bVAE
    loss_mode = 'chamfer' 

    loss_objective = "H" # Higgin et al "H", or Burgess et al "B"

    C_max = 25          # default 25, pending addition to args
    C_stop_iter = 1e5   # default 1e5, pending addition to args
    global_iter = 0     # iteration count
    C_max = Variable(torch.FloatTensor([C_max]).cuda()) # use_cuda = True

    gamma = 1000        # default 1000, pending addition to args
    beta = 4            # default 4, pending addition to args
    w_beta = 0.5        # weight assigned to beta loss against reconstruction loss (chamfer distance)


    # training process for 'shapenet_part' or 'shapenet_core13'
    #capsule_net.train()
    if 'train_dataloader' in locals().keys() :
        for epoch in range(opt.n_epochs+1):
            if epoch < 50:
                optimizer = optim.Adam(capsule_net.parameters(), lr=0.01)
            elif epoch<150:
                optimizer = optim.Adam(capsule_net.parameters(), lr=0.001)
            else:
                optimizer = optim.Adam(capsule_net.parameters(), lr=0.0001)

            capsule_net.train()
            train_loss_sum, recon_loss_sum, beta_loss_sum = 0, 0, 0

            for batch_id, data in enumerate(train_dataloader):
                global_iter += 1

                points, _= data
                if(points.size(0)<opt.batch_size):
                    break
                points = Variable(points)
                points = points.transpose(2, 1)
                if USE_CUDA:
                    points = points.cuda()
    
                optimizer.zero_grad()
                
                # ---- CRITICAL PART: new train loss computation (train_loss in bVAE was beta_vae_loss)
                #x_recon, latent_caps, caps_recon, logvar = capsule_net(points) # returns x_recon, latent_caps, caps_recon, logvar
                latent_capsules, x_recon = capsule_net(points)
                recon_loss = reconstruction_loss(points, x_recon, "chamfer") # RECONSTRUCTION LOSS
                #caps_loss = reconstruction_loss(latent_caps, caps_recon, "mse")
                #total_kld, _, _ = kl_divergence(latent_caps, logvar) # DIVERGENCE

                #if loss_objective == 'H':
                #    beta_loss = beta * total_kld
                #elif loss_objective == 'B':
                #    C = torch.clamp(C_max/C_stop_iter*global_iter, 0, C_max.data[0])
                #    beta_loss = gamma*(total_kld-C).abs()

                # sum of losses
                #beta_total_loss = beta_loss.sum()
                #train_loss = 0.7 * recon_loss + 0.2 * caps_loss + 0.1 * beta_total_loss # LOSS (can be weighted)
                
                # original train loss computation
                #train_loss = capsule_net.module.loss(points, x_recon)
                train_loss = recon_loss
                #train_loss.backward()

                # combining per capsule loss (pyTorch requires)
                train_loss.backward()
                optimizer.step()
                train_loss_sum += train_loss.item()

                # ---- END OF CRITICAL PART ----
                
                if LOGGING:
                    info = {'train loss': train_loss.item()}
                    for tag, value in info.items():
                        logger.scalar_summary(
                            tag, value, (len(train_dataloader) * epoch) + batch_id + 1)                
              
                if batch_id % 50 == 0:
                    print('batch_no: %d / %d, train_loss: %f ' %  (batch_id, len(train_dataloader), train_loss.item()))
    
            print('\nAverage train loss of epoch %d : %f\n' %\
                (epoch, (train_loss_sum / len(train_dataloader))))

            if epoch% 5 == 0:
                dict_name = "%s/%s_dataset_%dcaps_%dvec_%d.pth"%\
                    (opt.outf, opt.dataset, opt.latent_caps_size, opt.latent_vec_size, epoch)
                torch.save(capsule_net.module.state_dict(), dict_name)

    # training process for 'shapenet_core55' (NOT UP-TO-DATE)
    else:
        for epoch in range(opt.n_epochs+1):
            if epoch < 20:
                optimizer = optim.Adam(capsule_net.parameters(), lr=0.001)
            elif epoch<50:
                optimizer = optim.Adam(capsule_net.parameters(), lr=0.0001)
            else:
                optimizer = optim.Adam(capsule_net.parameters(), lr=0.00001)
        
            #capsule_net.train()
            train_loss_sum, recon_loss_sum, beta_loss_sum = 0, 0, 0

            while train_dataset.has_next_batch():
                global_iter += 1

                batch_id, points_= train_dataset.next_batch()
                points = torch.from_numpy(points_)
                if(points.size(0)<opt.batch_size):
                    break
                points = Variable(points)
                points = points.transpose(2, 1)
                if USE_CUDA:
                    points = points.cuda()

                optimizer.zero_grad()

                # ---- CRITICAL PART: same as above
                x_recon, mu, logvar = capsule_net(points)
                recon_loss = reconstruction_loss(points, x_recon, loss_mode)
                total_kld, dim_wise_kld, mean_kld = kl_divergence(mu, logvar)

                if loss_objective == 'H':
                    beta_loss = beta*total_kld
                elif loss_objective == 'B':
                    C = torch.clamp(C_max/C_stop_iter*global_iter, 0, C_max.data[0])
                    beta_loss = gamma*(total_kld-C).abs() 

                train_loss = ((1-w_beta) * recon_loss + w_beta * beta_loss).sum()

                train_loss.backward()
                optimizer.step()
                train_loss_sum += train_loss.item()
                recon_loss_sum += recon_loss.item()
                beta_loss_sum += beta_loss.sum().item()
                # ---- END OF CRITICAL PART ----       

                if LOGGING:
                    info = {'train_loss': scalar_loss.item()}
                    for tag, value in info.items():
                        logger.scalar_summary(
                            tag, value, (int(57448/opt.batch_size) * epoch) + batch_id + 1)
                    
                if batch_id % 50 == 0:
                    print('batch_no: %d / %d at epoch %d; train_loss: %f ' %  (batch_id, int(57448/opt.batch_size),epoch,train_loss.item() )) # the dataset size is 57448
            
            print('Average train loss of epoch %d : %f' % \
                (epoch, (train_loss_sum / int(57448/opt.batch_size))))   
            print("Average reconstruction loss (10x): %f, beta loss (1e4x): %f" % \
                (recon_loss_sum * 100 / int(57448/opt.batch_size), beta_loss_sum * 10000 / int(57448/opt.batch_size)) )

            train_dataset.reset()

            if epoch % 5 == 0:
                dict_name = "%s/%s_dataset_%dcaps_%dvec_%d.pth"%\
                    (opt.outf, opt.dataset, opt.latent_caps_size, opt.latent_vec_size, epoch)
                torch.save(capsule_net.module.state_dict(), dict_name)