Пример #1
0
def _abs(vector: Vector.Type()):
    abs_vector = numpy_abs(vector)
    i_max = argmax(abs_vector)
    i_max = int(
        i_max
    )  # numpy.intXX types are not subclasses of int, but can be converted to int
    return AbsOutput(vector[i_max], (i_max, ))
Пример #2
0
def _abs(matrix: Matrix.Type()):
    abs_matrix = numpy_abs(matrix)
    (i_max, j_max) = unravel_index(argmax(abs_matrix), abs_matrix.shape)
    (i_max, j_max) = (
        int(i_max), int(j_max)
    )  # numpy.intXX types are not subclasses of int, but can be converted to int
    return AbsOutput(matrix[(i_max, j_max)], (i_max, j_max))
Пример #3
0
 def get_time_axis(self, step=0):
     if USE_NNUMPY:
         return numpy_abs(self.get_wave(step))
     else:
         shallow_copy = self.get_wave(step).copy()
         for i in range(len(shallow_copy)):
             if shallow_copy[i] < 0:
                 shallow_copy[i] = -shallow_copy[i]
         return shallow_copy
Пример #4
0
    def test_get_set_weights_scaled(self):
        """Test that ``alpha`` affects the ``get_weights()`` scaled."""
        omega = 0.5
        python_tile = self.get_noisefree_tile(5, 4)
        cpp_tile = python_tile.tile
        init_weights = cpp_tile.get_weights().copy()

        # Get weights with alpha.
        python_tile.set_weights_scaled(from_numpy(init_weights), omega=omega)

        alpha = cpp_tile.get_alpha_scale()
        self.assertEqual(alpha, numpy_abs(init_weights).max() / omega)

        init_weights_scaled = cpp_tile.get_weights()

        assert_array_almost_equal(init_weights, init_weights_scaled * alpha)

        init_weights2 = python_tile.get_weights_scaled()[0].cpu().numpy()
        assert_array_almost_equal(init_weights, init_weights2)
Пример #5
0
    def get_time_axis(self, step=0):
        """
        Returns the time axis raw data. Please note that the time axis may not have a constant time step. LTSpice will
        increase the time-step in simulation phases where there aren't value changes, and decrease time step in
        the parts where more time accuracy is needed.

        :param step: Optional step number if reading a raw file with stepped data.
        :type step: int
        :return: time axis
        :rtype: list[float] or numpy.array
        """
        if USE_NNUMPY:
            return numpy_abs(self.get_wave(step))
        else:
            shallow_copy = self.get_wave(step).copy()
            for i in range(len(shallow_copy)):
                if shallow_copy[i] < 0:
                    shallow_copy[i] = -shallow_copy[i]
            return shallow_copy
Пример #6
0
def mean_abs_f(a, axis=None, weights=None, masked=False):
    """Return the mean of the absolute array, or the means of the
    absolute array along an axis.

    :Parameters:

        a: numpy array_like
            Input array

        axis: `int`, optional
            Axis along which to operate. By default, flattened input is
            used.

        masked: `bool`

    :Returns:

        2-tuple of `numpy.ndarray`
            The sample sizes and the means of the absolute values.

    """
    return mean_f(numpy_abs(a), axis=axis, weights=weights, masked=masked)
Пример #7
0
def max_abs_f(a, axis=None, masked=False):
    """Return the maximum of the absolute array, or the maximum of the
    absolute array along an axis.

    :Parameters:

        a: numpy array_like
            Input array

        axis: `int`, optional
            Axis along which to operate. By default, flattened input
            is used.

        masked: bool

    :Returns:

        2-tuple of numpy arrays
            The sample sizes and the maxima of the absolute values.

    """
    return max_f(numpy_abs(a), axis=axis, masked=masked)
Пример #8
0
def min_abs_f(a, axis=None, masked=False):
    '''Return the minimum of the absolute array, or the minima of the
    absolute array along an axis.

    :Parameters:

        a: numpy array_like
            Input array

        axis: `int`, optional
            Axis along which to operate. By default, flattened input is
            used.

        masked: `bool`

    :Returns:

        out: 2-tuple of `numpy.ndarray`
            The sample sizes and the minima of the absolute values.

    '''
    return min_f(numpy_abs(a), axis=axis, masked=masked)
Пример #9
0
def _abs(matrix: Matrix.Type()):
    abs_matrix = numpy_abs(matrix)
    (i_max, j_max) = unravel_index(argmax(abs_matrix), abs_matrix.shape)
    # i_max and j_max are of type numpy.intXX which is not a subclass of int, but can be converted to int
    (i_max, j_max) = (int(i_max), int(j_max))
    return AbsOutput(matrix[(i_max, j_max)], (i_max, j_max))
Пример #10
0
    def set_weights_scaled(
            self,
            weights: Tensor,
            biases: Optional[Tensor] = None,
            realistic: bool = False,
            n_loops: int = 10,
            omega: float = 1.0
    ) -> None:
        r"""Set the tile weights (and biases) in a scaled fashion.

        Similar to :meth:`set_weights`, however, additionally scales the weights
        by a global scale :math:`\alpha`, that is then applied in digital at the
        output of forward and backward pass, and the learning rate for this tile
        is adjusted accordingly.

        The weights are scaled by :math:`\omega/\max_{ij} |w_{ij}|` and the global
        digital factor :math:`alpha` is set to :math:`\max_{ij} |w_{ij}|/\omega`.

        It can be shown that such a constant factor greatly improves the SNR and
        training accuracy as the full weight range of the analog devices are
        used. See also `Rasch, Gokmen & Haensch (2019)`_ for more details.

        Caution:
            Using ``get_weights`` will now retrieve the true analog weights
            *without* applying the global factor. To get the true weights, use
            ``get_weights`` and scale it by the :math:`\alpha` of this layer
            which can be retrieved by ``get_alpha_scale()``.

        Args:
            weights: ``[out_size, in_size]`` weight matrix.
            biases: ``[out_size]`` bias vector. This parameter is required if
                ``self.bias`` is ``True``, and ignored otherwise.
            realistic: whether to use the forward and update pass to program the
                weights iteratively, using :meth:`set_weights_realistic`.
            n_loops: number of times the columns of the weights are set in a
                closed-loop manner.
                A value of ``1`` means that all columns in principle receive
                enough pulses to change from ``w_min`` to ``w_max``.
            omega: where the weight max should be mapped in terms of the weight
                range. Note that for ``omega`` larger than the maximal weight of
                the device, weights will get clipped for most devices.

        Returns:
            None.

        Raises:
            ValueError: if the tile has bias but ``bias`` has not been
                specified.

        .. _`Rasch, Gokmen & Haensch (2019)`: https://arxiv.org/abs/1906.02698
        """
        # Prepare the array expected by the pybind function, appending the
        # biases row if needed.
        weights_numpy = weights.clone().detach().cpu().numpy()

        if self.bias:
            # Create a ``[out_size, in_size (+ 1)]`` matrix.
            if biases is None:
                raise ValueError('Analog tile has a bias, but no bias given')

            biases_numpy = expand_dims(biases.clone().detach().cpu().numpy(), 1)
            combined_weights = concatenate([weights_numpy, biases_numpy], axis=1)
        else:
            # Use only the ``[out_size, in_size]`` matrix.
            combined_weights = weights_numpy

        # Scale the weights.
        weight_max = numpy_abs(combined_weights).max()
        combined_weights = combined_weights/weight_max*omega
        alpha = weight_max/omega
        self.tile.set_alpha_scale(alpha)

        if realistic:
            return self.tile.set_weights_realistic(combined_weights, n_loops)

        return self.tile.set_weights(combined_weights)