Ejemplo n.º 1
0
    def forward(self, x_input: Tensor) -> Tensor:
        """Compute the forward pass."""
        # pylint: disable=arguments-differ,arguments-renamed

        if self.analog_tile_count() == 1:
            out = AnalogFunction.apply(
                self.analog_tile_array[0][0].get_analog_ctx(), x_input,
                self.analog_tile_array[0][0].shared_weights, not self.training)

            if self.weight_scaling_omega:
                out = out * self.analog_tile_array[0][0].out_scaling_alpha

            if self.digital_bias:
                return out + self.bias
            return out

        # mapped version
        last_dim = x_input.ndim - 1
        splits = split(x_input, self.in_sizes, dim=last_dim)
        result = None  # type: Tensor
        for idx, (x, in_tiles) in enumerate(zip(splits, self.analog_tile_array)):
            out_result = []

            for analog_tile in in_tiles:
                output = AnalogFunction.apply(
                    analog_tile.get_analog_ctx(), x,
                    analog_tile.shared_weights, not self.training)

                if self.weight_scaling_omega:
                    output = output * analog_tile.out_scaling_alpha

                out_result.append(output)

            if idx == 0:
                result = cat(out_result, last_dim)
            else:
                result.add_(cat(out_result, last_dim))

        # add bias to final result
        if self.digital_bias:
            return result.add_(self.bias)
        return result
Ejemplo n.º 2
0
    def forward(self, x_input: Tensor) -> Tensor:
        """Compute the forward pass."""
        # pylint: disable=arguments-differ, arguments-renamed

        out = AnalogFunction.apply(
                self.analog_tile.get_analog_ctx(), x_input,
                self.analog_tile.shared_weights, not self.training)

        out = self.analog_tile.apply_out_scaling(out, (-1, ))

        if self.digital_bias:
            return out + self.bias
        return out
Ejemplo n.º 3
0
    def forward(self, x_input: Tensor) -> Tensor:
        """Computes the forward pass."""

        # pylint: disable=arguments-differ

        def get_size(size: int, i: int) -> int:
            """Calculate the output image sizes"""
            nom = (size + 2 * self.padding[i] - self.dilation[i] *
                   (self.kernel_size[i] - 1) - 1)
            return nom // self.stride[i] + 1

        input_size = x_input.numel() / x_input.size(0)
        if not self.fold_indices.numel() or self.input_size != input_size:
            # pytorch just always uses NCHW order?
            fold_indices = arange(2, input_size + 2, dtype=float64).detach()
            shape = [1] + list(x_input.shape[1:])
            fold_indices = fold_indices.reshape(*shape)
            unfold = Unfold(kernel_size=self.kernel_size,
                            stride=self.stride,
                            padding=self.padding,
                            dilation=self.dilation)
            fold_indices = unfold(fold_indices).flatten().round().to(
                dtype=int32)

            if self.use_bias:
                out_image_size = fold_indices.numel() // self.out_channels
                fold_indices = cat(
                    (fold_indices, ones(out_image_size, dtype=int32)), 0)

            self.fold_indices = fold_indices.to(x_input.device)

            x_height = x_input.size(2)
            x_width = x_input.size(3)

            d_height = get_size(x_height, 0)
            d_width = get_size(x_width, 1)

            image_sizes = (self.in_channels, x_height, x_width, d_height,
                           d_width)
            self.input_size = input_size
            self.analog_tile.set_indexed(self.fold_indices,
                                         image_sizes)  # type: ignore

        return AnalogFunction.apply(self.analog_tile, x_input, self.weight,
                                    self.bias)
Ejemplo n.º 4
0
 def forward(self, x_input: Tensor) -> Tensor:
     """Compute the forward pass."""
     # pylint: disable=arguments-differ,arguments-renamed
     return AnalogFunction.apply(self.analog_tile.get_analog_ctx(), x_input,
                                 self.analog_tile.shared_weights,
                                 not self.training)
Ejemplo n.º 5
0
 def forward(self, x_input: Tensor) -> Tensor:
     """Computes the forward pass."""
     # pylint: disable=arguments-differ
     return AnalogFunction.apply(self.analog_tile, x_input, self.weight,
                                 self.bias, not self.training)