Example #1
0
    def forward(self, x_input: Tensor) -> Tensor:
        """Compute the forward pass."""
        # pylint: disable=arguments-differ,arguments-renamed

        input_size = x_input.numel() / x_input.size(0)
        if self.input_size != input_size:
            self.recalculate_indexes(x_input)

        if self.analog_tile_count() == 1:
            analog_tile = self.analog_tile_array[0][0]
            output = AnalogIndexedFunction.apply(analog_tile.get_analog_ctx(),
                                                 x_input,
                                                 analog_tile.shared_weights,
                                                 not self.training)

            if self.weight_scaling_omega:
                alpha = analog_tile.out_scaling_alpha
                if self.weight_scaling_omega_columnwise:
                    alpha = self.get_tensor_view(self.out_channels, alpha)
                output = output * alpha

            if self.digital_bias:
                digital_bias = self.get_tensor_view(self.out_channels,
                                                    self.bias)
                return output + digital_bias
            return output

        # mapped version
        channel_dim = 1
        splits = split(x_input, self.in_sizes, dim=channel_dim)
        result = None  # type: Tensor
        for idx, (x, in_tiles) in enumerate(zip(splits,
                                                self.analog_tile_array)):
            out_result = []
            input_size = x.numel() / x.size(0)

            for analog_tile in in_tiles:
                output = AnalogIndexedFunction.apply(
                    analog_tile.get_analog_ctx(), x,
                    analog_tile.shared_weights, not self.training)

                if self.weight_scaling_omega:
                    alpha = analog_tile.out_scaling_alpha
                    if self.weight_scaling_omega_columnwise:
                        alpha = self.get_tensor_view(output.size(1), alpha)
                    output = output * alpha

                out_result.append(output)

            if idx == 0:
                result = cat(out_result, channel_dim)
            else:
                result.add_(cat(out_result, channel_dim))

        # add bias to final result
        if self.digital_bias:
            digital_bias = self.get_tensor_view(self.out_channels, self.bias)
            return result + digital_bias
        return result
Example #2
0
    def forward(self, x_input: Tensor) -> Tensor:
        """Computes the forward pass."""
        input_size = x_input.numel() / x_input.size(0)
        if not self.fold_indices.numel() or self.input_size != input_size:
            self.recalculate_indexes(x_input)

        return AnalogIndexedFunction.apply(self.analog_tile, x_input,
                                           self.weight, self.bias,
                                           not self.training)
Example #3
0
    def forward(self, x_input: Tensor) -> Tensor:
        """Compute the forward pass."""
        input_size = x_input.numel() / x_input.size(0)
        if not self.fold_indices.numel() or self.input_size != input_size:
            self.recalculate_indexes(x_input)

        out = AnalogIndexedFunction.apply(self.analog_tile.get_analog_ctx(),
                                          x_input,
                                          self.analog_tile.shared_weights,
                                          not self.training)

        out = self.analog_tile.apply_out_scaling(out, self.tensor_view)

        if self.digital_bias:
            return out + self.bias.view(*self.tensor_view)
        return out
Example #4
0
    def forward(self, x_input: Tensor) -> Tensor:
        """Computes the forward pass."""

        # pylint: disable=arguments-differ

        def get_size(size: int, i: int) -> int:
            """Calculate the output image sizes"""
            nom = (size + 2 * self.padding[i] - self.dilation[i] *
                   (self.kernel_size[i] - 1) - 1)
            return nom // self.stride[i] + 1

        input_size = x_input.numel() / x_input.size(0)
        if not self.fold_indices.numel() or self.input_size != input_size:
            # pytorch just always uses NCHW order?
            fold_indices = arange(2, input_size + 2, dtype=float64).detach()
            shape = [1] + list(x_input.shape[1:])
            fold_indices = fold_indices.reshape(*shape)
            unfold = Unfold(kernel_size=self.kernel_size,
                            stride=self.stride,
                            padding=self.padding,
                            dilation=self.dilation)
            fold_indices = unfold(fold_indices).flatten().round().to(
                dtype=int32)

            if self.use_bias:
                out_image_size = fold_indices.numel() // self.out_channels
                fold_indices = cat(
                    (fold_indices, ones(out_image_size, dtype=int32)), 0)

            self.fold_indices = fold_indices.to(x_input.device)

            x_height = x_input.size(2)
            x_width = x_input.size(3)

            d_height = get_size(x_height, 0)
            d_width = get_size(x_width, 1)

            image_sizes = [
                self.in_channels, x_height, x_width, d_height, d_width
            ]
            self.input_size = input_size
            self.analog_tile.set_indexed(self.fold_indices,
                                         image_sizes)  # type: ignore

        return AnalogIndexedFunction.apply(self.analog_tile, x_input,
                                           self.weight, self.bias,
                                           not self.training)
Example #5
0
    def forward(self, x_input: Tensor) -> Tensor:
        """Compute the forward pass."""
        input_size = x_input.numel() / x_input.size(0)
        if not self.fold_indices.numel() or self.input_size != input_size:
            self.recalculate_indexes(x_input)

        out = AnalogIndexedFunction.apply(self.analog_tile.get_analog_ctx(),
                                          x_input,
                                          self.analog_tile.shared_weights,
                                          not self.training)

        if self.weight_scaling_omega:
            alpha = self.analog_tile.out_scaling_alpha
            if self.weight_scaling_omega_columnwise:
                alpha = self.get_tensor_view(alpha)
            out = out * alpha

        if self.digital_bias:
            digital_bias = self.get_tensor_view(self.bias)
            return out + digital_bias
        return out
Example #6
0
    def forward(self, x_input: Tensor) -> Tensor:
        """Computes the forward pass."""
        # pylint: disable=arguments-differ,too-many-locals

        def get_size(size: int, i: int) -> int:
            """Calculate the output image sizes"""
            nom = (size + 2 * self.padding[i] - self.dilation[i] * (self.kernel_size[i] - 1) - 1)
            return nom // self.stride[i] + 1

        input_size = x_input.numel()/x_input.size(0)
        if not self.fold_indices.numel() or self.input_size != input_size:
            # pytorch just always uses NCDHW order?
            fold_indices = arange(2, x_input.size(2)*x_input.size(3)*x_input.size(4)+2,
                                  dtype=float64).detach()
            shape = [1] + [1] + list(x_input.shape[2:])
            fold_indices = fold_indices.reshape(*shape)
            if not all(item == 0 for item in self.padding):
                fold_indices = pad(fold_indices, pad=(
                    self.padding[2], self.padding[2],
                    self.padding[1], self.padding[1],
                    self.padding[0], self.padding[0]), mode="constant", value=0)
            unfold = fold_indices.unfold(2, self.kernel_size[0], self.stride[0]).\
                unfold(3, self.kernel_size[1], self.stride[1]).\
                unfold(4, self.kernel_size[2], self.stride[2]).clone()

            fold_indices = unfold.reshape(-1, self.kernel_size[0] * self.kernel_size[1] *
                                          self.kernel_size[2]).transpose(0, 1).flatten().round()

            # concatenate the matrix index for different channels
            fold_indices_orig = fold_indices.clone()
            for i in range(self.in_channels-1):
                fold_indices_tmp = fold_indices_orig.clone()
                for j in range(fold_indices_orig.size(0)):
                    if fold_indices_orig[j] != 0:
                        fold_indices_tmp[j] += (input_size/self.in_channels)*(i+1)

                fold_indices = cat([fold_indices, fold_indices_tmp], dim=0).clone()

            fold_indices = fold_indices.to(dtype=int32)

            if self.use_bias:
                out_image_size = fold_indices.numel() // (self.kernel_size[0] *
                                                          self.kernel_size[1] *
                                                          self.kernel_size[2])
                fold_indices = cat((fold_indices, ones(out_image_size, dtype=int32)), 0)

            self.fold_indices = fold_indices.to(x_input.device)

            x_depth = x_input.size(2)
            x_height = x_input.size(3)
            x_width = x_input.size(4)

            d_depth = get_size(x_depth, 0)
            d_height = get_size(x_height, 1)
            d_width = get_size(x_width, 2)

            image_sizes = [self.in_channels, x_depth, x_height, x_width, d_depth, d_height, d_width]
            self.input_size = input_size
            self.analog_tile.set_indexed(self.fold_indices, image_sizes)  # type: ignore

        return AnalogIndexedFunction.apply(self.analog_tile, x_input, self.weight,
                                           self.bias, not self.training)