예제 #1
0
    def __init__(self, camera_fl, camera_size, filter_std, filter_scale):
        """ Initialize a ParticleProjection layer.

        Arguments:
            -camera_fl: The camera focal length in pixels (all pixels are
                        assumed to be square. This layer does not simulate
                        any image warping e.g. radial distortion).
            -camera_size: 2-tuple with the image width and height in pixels.
            -filter_std: The standard deviation of the Gaussian that is
                         added at each pixel location.
            -filter_scale: Before adding the Gaussian for an individual
                           particle, it is scaled by this value.
        """
        super(ParticleProjection, self).__init__()

        self.camera_size = ec.make_list(camera_size, 2, "camera_size",
                                        "%s > 0",
                                        "isinstance(%s, numbers.Integral)")

        self.camera_fl = ec.check_conditions(camera_fl, "camera_fl", "%s > 0",
                                             "isinstance(%s, numbers.Real)")
        self.filter_std = ec.check_conditions(filter_std, "filter_std",
                                              "%s > 0",
                                              "isinstance(%s, numbers.Real)")
        self.filter_scale = ec.check_conditions(
            filter_scale, "filter_scale", "%s > 0",
            "isinstance(%s, numbers.Real)")

        self.register_buffer(
            "empty_depth_mask",
            torch.ones(1, self.camera_size[1], self.camera_size[0]) *
            MAX_FLOAT)
예제 #2
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 ndim,
                 kernel_size,
                 dilation,
                 radius,
                 dis_norm=False,
                 kernel_fn='default',
                 with_params=True):
        """ Initialize a Smooth Particle Convolution layer.

        Arguments:
            -in_channels: The number of features for each input particle.
            -out_channels: The number of features to output for each particle.
            -ndim: The dimensionality of the particle's coordinate space.
            -kernel_size: (int or tuple) The shape of the kernel that is place around each
                          particle. The kernel is centered on the particle, so the size
                          must be odd.
            -dilation: (float or tuple) The spacing between each cell of the kernel.
            -radius: The radius to use when computing the neighbors for each query point.
            -kernel_fn: The kernel function to use in the SPH equation. Refer to kernels.py
                        for a list and explanation of all available functions.
            -dis_norm: If true, will divide by the particle-to-particle distance in the
                       SPH equation.
            -with_params: If true, the parameters weight and bias are registered with
                          PyTorch as parameters. Otherwise they are registered as buffers,
                          meaning they won't be optimized when doing backprop.
        """
        super(ConvSP, self).__init__()
        self.nchannels = ec.check_conditions(
            in_channels, "in_channels", "%s > 0",
            "isinstance(%s, numbers.Integral)")
        self.nkernels = ec.check_conditions(
            out_channels, "out_channels", "%s > 0",
            "isinstance(%s, numbers.Integral)")
        self.ndim = ec.check_conditions(
            ndim, "ndim", "%s > 0",
            "%s < " + str(_ext.spn_max_cartesian_dim()),
            "isinstance(%s, numbers.Integral)")

        self._kernel_size = ec.make_list(kernel_size, ndim, "kernel_size",
                                         "%s >= 0",
                                         "%s %% 2 == 1 # Must be odd",
                                         "isinstance(%s, numbers.Integral)")
        self._dilation = ec.make_list(dilation, ndim, "dilation", "%s >= 0",
                                      "isinstance(%s, numbers.Real)")

        self.radius = ec.check_conditions(radius, "radius", "%s >= 0",
                                          "isinstance(%s, numbers.Real)")

        self.kernel_fn = ec.check_conditions(kernel_fn, "kernel_fn",
                                             "%s in " + str(KERNEL_NAMES))
        self.kernel_fn = KERNEL_NAMES.index(self.kernel_fn)
        self.dis_norm = (1 if dis_norm else 0)

        self.ncells = np.prod(self._kernel_size)

        if with_params:
            self.register_parameter(
                "weight",
                torch.nn.Parameter(
                    torch.Tensor(self.nkernels, self.nchannels, self.ncells)))
            self.register_parameter(
                "bias", torch.nn.Parameter(torch.Tensor(self.nkernels)))
        else:
            self.register_buffer(
                "weight",
                torch.autograd.Variable(
                    torch.Tensor(self.nkernels, self.nchannels, self.ncells)))
            self.register_buffer(
                "bias", torch.autograd.Variable(torch.Tensor(self.nkernels)))

        self.register_buffer("kernel_size", ec.list2tensor(self._kernel_size))
        self.register_buffer("dilation", ec.list2tensor(self._dilation))

        self.nshared_device_mem = -1
        self.device_id = -1
예제 #3
0
    def __init__(self,
                 sdfs,
                 sdf_sizes,
                 out_channels,
                 ndim,
                 kernel_size,
                 dilation,
                 max_distance,
                 with_params=True,
                 compute_pose_grads=False):
        """ Initialize a SDF Convolution layer.

        Arguments:
            -sdfs: List of SDFs. Each SDF must be a ndim-dimensional float tensor, with
                   the value of the SDF evaluated at the center of each cell in the grid.
            -sdf_sizes: List of the size of one side of a grid cell in each SDf in sdfs.
            -out_channels: The number of features to output for each query location.
            -ndim: The dimensionality of the coordinate space.
            -kernel_size: (int or tuple) The shape of the kernel that is place around each
                          query location. The kernel is centered on the location, so the 
                          size must be odd.
            -dilation: (float or tuple) The spacing between each cell of the kernel.
            -max_distance: A cap on the maximum SDF value, i.e., the SDF value at any 
                           point p is min(min_i SDF_i(p), max_distance).
            -with_params: If true, the parameters weight and bias are registered with
                          PyTorch as parameters. Otherwise they are registered as buffers,
                          meaning they won't be optimized when doing backprop.
            -compute_pose_grads: If False, will not compute grads wrt to the sdfposes during
                                 backpropagation. This is done to increase speed when
                                 gradients are not neede for the sdf poses.
        """
        super(ConvSDF, self).__init__()
        self.nkernels = ec.check_conditions(
            out_channels, "out_channels", "%s > 0",
            "isinstance(%s, numbers.Integral)")
        self.ndim = ec.check_conditions(
            ndim, "ndim", "%s > 0",
            "%s < " + str(_ext.spn_max_cartesian_dim()),
            "%s in [1, 2, 3] # Only 1-, 2-, and 3-D are suported",
            "isinstance(%s, numbers.Integral)")
        self.max_distance = ec.check_conditions(
            max_distance, "max_distance", "%s >= 0",
            "isinstance(%s, numbers.Real)")

        self._kernel_size = ec.make_list(kernel_size, ndim, "kernel_size",
                                         "%s >= 0",
                                         "%s %% 2 == 1 # Must be odd",
                                         "isinstance(%s, numbers.Integral)")
        self._dilation = ec.make_list(dilation, ndim, "dilation", "%s >= 0",
                                      "isinstance(%s, numbers.Real)")

        self.register_buffer("sdfs", torch.zeros(1))
        self.register_buffer("sdf_shapes", torch.zeros(1))
        self.register_buffer("sdf_offsets", torch.zeros(1))
        self.SetSDFs(sdfs, sdf_sizes)

        self.ncells = np.prod(self._kernel_size)
        if with_params:
            self.register_parameter(
                "weight",
                torch.nn.Parameter(torch.Tensor(self.nkernels, self.ncells)))
            self.register_parameter(
                "bias", torch.nn.Parameter(torch.Tensor(self.nkernels)))
        else:
            self.register_buffer(
                "weight",
                torch.autograd.Variable(
                    torch.Tensor(self.nkernels, self.ncells)))
            self.register_buffer(
                "bias", torch.autograd.Variable(torch.Tensor(self.nkernels)))

        self.compute_pose_grads = (True if compute_pose_grads else False)

        self._kernel_size = ec.list2tensor(self._kernel_size)
        self._dilation = ec.list2tensor(self._dilation)

        self.register_buffer("kernel_size", self._kernel_size)
        self.register_buffer("dilation", self._dilation)