示例#1
0
 def evaluate_iou(self, images, voxels):
     vertices, faces = self.decoder(self.encoder(images))
     faces = neural_renderer.vertices_to_faces(vertices, faces).data
     faces = faces * 1. * (32. - 1) / 32. + 0.5  # normalization
     voxels_predicted = voxelization.voxelize(faces, 32, False)
     #voxels_predicted = voxels_predicted.transpose((0, 2, 1, 3))[:, :, :, ::-1]
     iou = (voxels * voxels_predicted).sum((1, 2, 3)) / (0 < (voxels + voxels_predicted)).sum((1, 2, 3))
     return iou
    def evaluate_iou(self, images, voxels):
        codes = self.encode(images)
        vertices, faces = self.decode_shape(codes)
        faces = vertices[:, faces].data
        faces += 0.5  # normalization
        voxels_predicted = voxelization.voxelize(faces, 32)[:, :, :, ::-1]

        iou = (voxels * voxels_predicted).sum((1, 2, 3)) / (0 < (voxels + voxels_predicted)).sum((1, 2, 3))
        return iou
示例#3
0
 def evaluate_iou(self, images, voxels):
     vertices, faces = self.predict_shape(images)
     faces = vertices[:, faces].data
     faces = (faces + 1.0) * 0.5  # normalization
     voxels_predicted = voxelization.voxelize(faces, 32)
     voxels_predicted = voxels_predicted.transpose(
         (0, 1, 3, 2))[:, ::-1, ::-1, ::-1]
     iou = (voxels * voxels_predicted).sum(
         (1, 2, 3)) / (0 < (voxels + voxels_predicted)).sum((1, 2, 3))
     return iou
示例#4
0
文件: 3D-GAN.py 项目: hiyyg/learn3D
def eval_IoU(encoder,
             mesh_generator,
             dataset_val,
             class_ids=opt.class_ids.split(',')):
    mesh_generator.eval()
    encoder.eval()
    with torch.no_grad():
        for class_id in class_ids:
            loader_val = data.DataLoader(
                dataset_val,
                batch_sampler=data_loader.ShapeNet_sampler_all(
                    dataset_val, opt.batch_size, class_id, nViews),
                num_workers=4)
            iou = 0
            ious = {}
            print('%s_%s has %d images, %d batches...' %
                  (dataset_val.set_name, class_id,
                   dataset_val.num_data[class_id] * nViews, len(loader_val)))
            for i, (imgs, _, _, voxels) in enumerate(loader_val):
                real_imgs = Variable(imgs.to(device))
                z = encoder(real_imgs)
                vertices, faces = mesh_generator(z)
                faces = nr.vertices_to_faces(vertices, faces).data
                faces = faces * 1. * (32. - 1) / 32. + 0.5  # normalization
                voxels_predicted = voxelization.voxelize(faces, 32, False)
                if opt.dataset == 'CVPR18':
                    voxels_predicted = voxels_predicted.transpose(1,
                                                                  2).flip([3])
                elif opt.dataset == 'NIPS17':
                    voxels_predicted = voxels_predicted.transpose(1,
                                                                  3).flip([1])

                iou_batch = torch.Tensor.float(voxels * voxels_predicted.cpu()).sum((1, 2, 3)) / \
                            torch.Tensor.float(0 < (voxels + voxels_predicted.cpu())).sum((1, 2, 3))
                iou += iou_batch.sum()
            iou /= dataset_val.num_data[class_id] * nViews
            print('%s/iou_%s: %f' %
                  (dataset_val.set_name, class_id, iou.item()))
            ious['%s/iou_%s' % (dataset_val.set_name, class_id)] = iou.item()
        iou_mean = np.mean([float(v) for v in ious.values()])
        ious['%s/iou' % dataset_val.set_name] = iou_mean
        print('%s/iou: %f' % (dataset_val.set_name, iou_mean))

        mesh_generator.train()
        encoder.train()
        return iou_mean
示例#5
0
def run():
    directory_shapenet = '/data/unagi0/kato/datasets/ShapeNetCore.v1'
    directory_rendering = '/home/mil/kato/large_data/lsm/shapenet_release'
    class_list_shapenet = [
        '02691156', '02828884', '02933112', '02958343', '03001627', '03211117',
        '03636649', '03691459', '04090263', '04256520', '04379243', '04401088',
        '04530566'
    ]
    class_list_shapenet = [
        '02958343', '03001627', '03211117', '03636649', '03691459', '04090263',
        '04256520', '04379243', '04401088', '04530566'
    ]
    class_list_pascal = ['aeroplane', 'car', 'chair']
    skip_ids = [
        '187f32df6f393d18490ad276cd2af3a4',  # invalid image
        '391fa4da294c70d0a4e97ce1d10a5ae6',  # invalid image
        '50cdaa9e33fc853ecb2a965e75be701c',  # failed to load
    ]

    parser = argparse.ArgumentParser()

    # system
    parser.add_argument('-eid', '--experiment_id', type=str, required=True)
    parser.add_argument('-md',
                        '--model_directory',
                        type=str,
                        default='./data/models')
    parser.add_argument('-dd',
                        '--dataset_directory',
                        type=str,
                        default='./data/dataset')
    parser.add_argument('-rs', '--random_seed', type=int, default=0)
    parser.add_argument('-g', '--gpu', type=int, default=0)

    # components
    parser.add_argument('-vs', '--vertex_scaling', type=float, default=0.01)
    parser.add_argument('-ts', '--texture_scaling', type=float, default=1)

    # training
    parser.add_argument('-bs', '--batch_size', type=int, default=20)
    parser.add_argument('-nt', '--no_texture', type=int, default=1)

    if 'shapenet' in sys.argv:
        # shapenet
        parser.add_argument('-ds', '--dataset', type=str, default='shapnet')
        parser.add_argument('-cls',
                            '--class_ids',
                            type=str,
                            default=','.join(class_list_shapenet))
        parser.add_argument('-sym', '--symmetric', type=int, default=0)

        # components
        parser.add_argument('-et',
                            '--encoder_type',
                            type=str,
                            default='resnet18')
        parser.add_argument('-sdt',
                            '--shape_decoder_type',
                            type=str,
                            default='conv')
        parser.add_argument('-tdt',
                            '--texture_decoder_type',
                            type=str,
                            default='conv')
        parser.add_argument('-dt',
                            '--discriminator_type',
                            type=str,
                            default='shapenet_patch')
    elif 'pascal' in sys.argv:
        # dataset
        parser.add_argument('-ds', '--dataset', type=str, default='pascal')
        parser.add_argument('-cls',
                            '--class_ids',
                            type=str,
                            default=','.join(class_list_pascal))
        parser.add_argument('-sym', '--symmetric', type=int, default=1)

        # components
        parser.add_argument('-et',
                            '--encoder_type',
                            type=str,
                            default='resnet18pt')
        parser.add_argument('-sdt',
                            '--shape_decoder_type',
                            type=str,
                            default='basic_symmetric')
        parser.add_argument('-tdt',
                            '--texture_decoder_type',
                            type=str,
                            default='basic')
        parser.add_argument('-dt',
                            '--discriminator_type',
                            type=str,
                            default='pascal_patch')

    args = parser.parse_args()
    directory_output = os.path.join(args.model_directory, args.experiment_id)
    class_ids = args.class_ids.split(',')

    # set random seed, gpu
    chainer.cuda.get_device(args.gpu).use()
    random.seed(args.random_seed)
    np.random.seed(args.random_seed)
    cp.random.seed(args.random_seed)

    # load dataset
    if args.dataset == 'shapenet':
        dataset = dataset_shapenet.ShapeNet(args.dataset_directory,
                                            class_ids,
                                            'test',
                                            device=args.gpu)
    else:
        dataset = dataset_pascal.Pascal(args.dataset_directory, class_ids,
                                        'test')

    # setup model & optimizer
    if args.dataset == 'shapenet':
        model = models.ShapeNetModel(
            encoder_type=args.encoder_type,
            shape_decoder_type=args.shape_decoder_type,
            texture_decoder_type=args.texture_decoder_type,
            discriminator_type=args.discriminator_type,
            vertex_scaling=args.vertex_scaling,
            texture_scaling=args.texture_scaling,
            silhouette_loss_levels=0,
            lambda_silhouettes=0,
            lambda_textures=0,
            lambda_perceptual=0,
            lambda_inflation=0,
            lambda_discriminator=0,
            lambda_graph_laplacian=0,
            lambda_edge_length=0,
            single_view_training=False,
            class_conditional=False,
            iterative_optimization=False,
            discriminator_mode=None,
            symmetric=args.symmetric,
            no_texture=args.no_texture,
            num_views=20,
        )
    elif args.dataset == 'pascal':
        model = models.PascalModel(
            encoder_type=args.encoder_type,
            shape_decoder_type=args.shape_decoder_type,
            texture_decoder_type=args.texture_decoder_type,
            discriminator_type=args.discriminator_type,
            vertex_scaling=args.vertex_scaling,
            texture_scaling=args.texture_scaling,
            silhouette_loss_levels=args.silhouette_loss_levels,
            lambda_silhouettes=args.lambda_silhouettes,
            lambda_inflation=args.lambda_inflation,
            lambda_discriminator=args.lambda_discriminator,
            lambda_graph_laplacian=args.lambda_graph_laplacian,
            no_texture=args.no_texture,
        )
    del model.discriminator
    chainer.serializers.load_npz(os.path.join(directory_output, 'model.npz'),
                                 model,
                                 strict=False)
    model.to_gpu(args.gpu)

    with chainer.using_config('enable_backprop', False):
        with chainer.configuration.using_config('train', False):
            for class_id in tqdm.tqdm(dataset.class_ids):

                for batch_num, batch in tqdm.tqdm(
                        enumerate(
                            dataset.get_all_batches_for_evaluation(
                                args.batch_size, class_id))):
                    object_id = dataset.object_ids[class_id][batch_num]
                    filename_ref = '%s/%s/%s/model.obj' % (directory_shapenet,
                                                           class_id, object_id)

                    v_ref, i_ref = neural_renderer.load_obj(
                        filename_ref, normalization=False)
                    v_ref = chainer.cuda.to_gpu(v_ref)
                    i_ref = chainer.cuda.to_gpu(i_ref)
                    f_ref = v_ref[i_ref]
                    p_ref = sample_points(f_ref).get()[0]

                    images_in, voxels = training.converter(batch)
                    v_p, i_p = model.decode_shape(model.encode(images_in))
                    v_p = v_p.data
                    f_p = v_p[:, i_p]
                    p_p = sample_points(f_p).get()

                    import voxelization
                    v_r = voxelization.voxelize(f_ref[None, :, :] + 0.5,
                                                32)[0].get()
                    v_p = voxelization.voxelize(f_p + 0.5, 32).get()

                    if not os.path.exists('/home/mil/kato/temp/points/%s/%s' %
                                          (args.experiment_id, class_id)):
                        os.makedirs('/home/mil/kato/temp/points/%s/%s' %
                                    (args.experiment_id, class_id))
                    np.save(
                        '/home/mil/kato/temp/points/%s/%s/%s_pr.npy' %
                        (args.experiment_id, class_id, object_id), p_ref)
                    np.save(
                        '/home/mil/kato/temp/points/%s/%s/%s_pp.npy' %
                        (args.experiment_id, class_id, object_id), p_p)
                    np.save(
                        '/home/mil/kato/temp/points/%s/%s/%s_vr.npy' %
                        (args.experiment_id, class_id, object_id), v_r)
                    np.save(
                        '/home/mil/kato/temp/points/%s/%s/%s_vp.npy' %
                        (args.experiment_id, class_id, object_id), v_p)
                    del p_ref, p_p, v_r, v_p, f_p, i_p, images_in, voxels, f_ref, i_ref