예제 #1
0
def create_shapenet_voxel_dataloader(dset_type_, data_base_dir_, batch_size_):
    dataset = VoxelDataset(
        dset_type=dset_type_,
        data_base_dir=data_base_dir_,
        transform=transforms.Compose([transforms.ToTensor()]))
    dataloader = DataLoader(
        dataset,
        batch_size=batch_size_,
        shuffle=True,
        num_workers=4)
    return dataloader
예제 #2
0
        epoch_id = filename[61:-3]
        sdf_net = SDFNet()
        sdf_net.filename = filename[7:]
        sdf_net.load()
        sdf_net.eval()

        clouds = sample_point_clouds(sdf_net,
                                     50,
                                     2048,
                                     voxel_resolution=64,
                                     latent_codes=latent_codes)
        np.save('data/chairs/results/voxels_{:s}.npy'.format(epoch_id), clouds)

if 'dataset' in sys.argv:
    from datasets import VoxelDataset
    dataset = VoxelDataset.from_split('data/airplanes/voxels_64/{:s}.npy',
                                      'data/airplanes/val.txt')
    from torch.utils.data import DataLoader
    voxels = next(iter(DataLoader(dataset, batch_size=100, shuffle=True)))
    print(voxels.shape)
    clouds = sample_from_voxels(voxels, 2048)
    np.save('data/dataset_airplanes_point_cloud_sample.npy', clouds)

if 'test' in sys.argv:
    import pyrender
    data = np.load('data/dataset_point_cloud_sample.npy')
    for i in range(data.shape[0]):
        points = data[i, :, :]
        scene = pyrender.Scene()
        scene.add(pyrender.Mesh.from_points(points))
        pyrender.Viewer(scene, use_raymond_lighting=True, point_size=8)
예제 #3
0
SURFACE_LEVEL = 0.011

FRAMES = SAMPLE_COUNT * TRANSITION_FRAMES
progress = np.arange(FRAMES, dtype=float) / TRANSITION_FRAMES

if USE_VAE:
    from model.autoencoder import Autoencoder, LATENT_CODE_SIZE
    vae = Autoencoder()
    vae.load()
    vae.eval()
    print("Calculating latent codes...")

    from datasets import VoxelDataset
    from torch.utils.data import DataLoader

    dataset = VoxelDataset.glob('data/chairs/voxels_32/**.npy')
    dataloader = DataLoader(dataset, batch_size=1000, num_workers=8)

    latent_codes = torch.zeros((len(dataset), LATENT_CODE_SIZE))

    with torch.no_grad():
        position = 0
        for batch in tqdm(dataloader):
            latent_codes[position:position + batch.shape[0], :] = vae.encode(
                batch.to(device)).detach().cpu()
    latent_codes = latent_codes.numpy()
else:
    from model.sdf_net import SDFNet, LATENT_CODES_FILENAME
    latent_codes = torch.load(LATENT_CODES_FILENAME).detach().cpu().numpy()

    sdf_net = SDFNet()
예제 #4
0
def create_shapenet_voxel_dataloader(dset_type_,
                                     data_base_dir_,
                                     batch_size_,
                                     subset_size_=None):
    dataset_ = VoxelDataset(dset_type=dset_type_, data_base_dir=data_base_dir_)
    return create_dataloader_from_dataset(dataset_, batch_size_, subset_size_)
예제 #5
0

ITERATION = int(get_parameter('iteration', 0))
# Continue with model parameters that were previously trained at the SAME iteration
# Otherwise, it will use the model parameters of the previous iteration or initialize randomly at iteration 0
CONTINUE = "continue" in sys.argv

FADE_IN_EPOCHS = 10
BATCH_SIZE = 16
GRADIENT_PENALTY_WEIGHT = 10
NUMBER_OF_EPOCHS = int(get_parameter('epochs', 250))

VOXEL_RESOLUTION = RESOLUTIONS[ITERATION]

dataset = VoxelDataset.from_split(
    'data/chairs/voxels_{:d}/{{:s}}.npy'.format(VOXEL_RESOLUTION),
    'data/chairs/train.txt')
data_loader = DataLoader(dataset,
                         batch_size=BATCH_SIZE,
                         shuffle=True,
                         num_workers=4)


def get_generator_filename(iteration):
    return 'hybrid_progressive_gan_generator_{:d}.to'.format(iteration)


generator = SDFNet(device='cpu')
discriminator = Discriminator()
if not CONTINUE and ITERATION > 0:
    generator.filename = get_generator_filename(ITERATION - 1)