Example #1
0
    def _raw_linear_transform(self, X, traits=None, user_input=None):
        """
        Do linear_transform of X, and return both raw OPU output and decoded output in a tuple
        """

        if traits is None:
            assert self._runner, "Call fit1d or fit2d before linear_transform"
            traits = self._runner.traits
        if user_input is None:
            user_input = OpuUserInput.from_traits(X, traits)

        if self._s.simulated:
            prepared_X = X
        else:
            assert self.device.acq_state.value != AcqState.online.value, \
                "Can't do linear transform when acquisition is" \
                " in online mode, only single vectors"
            assert self._runner.t.input_roi_strategy == InputRoiStrategy.full, \
                "ROI strategy must be full for linear_transform to be correct.\n" \
                "Set input_roi_strategy attribute to InputRoiStrategy.full."

            # X2 is now numpy 2D, whatever the initial shape and the type (torch or numpy)
            X2 = user_input.reshape_input(raveled_features=True,
                                          leave_single_dim=True)

            try:
                import lightonopu.linear_reconstruction as reconstruction
            except ImportError:
                raise RuntimeError(
                    "Need a lightonopu version with linear_reconstruction module"
                )

            start = time.time()
            prepared_X = reconstruction.encode_batch(X2)
            self._trace(f"Encoding time {time.time() - start} s")
            # Restore the dimension after batch encoding to something suitable for formatting
            prepared_X = user_input.unravel_features(prepared_X)
        # Run the OPU transform
        prepared_input = OpuUserInput.from_traits(prepared_X, traits)
        start = time.time()
        with self.device.acquiring(n_images=self._s.n_samples_by_pass):
            rp_opu = self._runner.transform(prepared_input, linear=True)
        self._trace(f"Transform time {time.time() - start} s")

        if self._s.simulated:
            result_ctx = rp_opu
        else:
            # Decoding forgets about the context, re-add it to result afterwards
            start = time.time()
            result = reconstruction.decode_batch(rp_opu)
            self._trace(f"Decoding time {time.time() - start} s")

            result_ctx = ContextArray(result, rp_opu.context)
        return rp_opu, result_ctx
Example #2
0
    def transform(self,
                  X,
                  encoder_cls=NoEncoding,
                  decoder_cls=NoDecoding) -> TransformOutput:
        """
        Performs the nonlinear random projections of one or several input vectors.

        The `fit1d` or `fit2d` method must be called before, for setting vector dimensions
        or online option.
        If you need to transform one vector after each other, add `online=True` in the fit function.

        Parameters
        ----------
        X:  np.ndarray or torch.Tensor
            input vector, or batch of input vectors.
            Each vector must have the same dimensions as the one given in `fit1d` or `fit2d`.
        encoder_cls: encoder.base.BaseTransformer, optional
            class or instance of class that transform the input into binary vectors to be processed by the opu.
        decoder_cls: encoder.base.BaseTransformer, optional
            class or instance of class that transforms the output of the opu back into the appropriate format.

        Returns
        -------
        Y: np.ndarray or torch.Tensor
             complete array of nonlinear random projections of X,
             of size self.n_components
             If input is an ndarray, type is actually ContextArray,
             with a context attribute to add metadata
        """
        assert self._runner, "Call fit1d or fit2d before transform"
        assert self.device.active, "OPU device isn't active, use opu.open() or \"with opu:\""

        if inspect.isclass(encoder_cls):
            encoder = encoder_cls()
        else:
            encoder = encoder_cls

        X_enc = encoder.transform(X)

        user_input = OpuUserInput.from_traits(X_enc, self._runner.traits)
        self._debug(str(user_input))

        if user_input.is_batch and not self._s.simulated:
            # With batch input start acquisition first
            assert self.device.acq_state.value != AcqState.online.value, \
                "Can't transform a batch of vectors when acquisition is" \
                " in online mode, only single vectors"
            with self.device.acquiring(n_images=self._s.n_samples_by_pass):
                out = self._runner.transform(user_input)
        else:
            out = self._runner.transform(user_input)
        return self._post_transform(out, user_input, encoder, decoder_cls)
Example #3
0
    def linear_transform(self,
                         X,
                         encoder_cls=NoEncoding,
                         decoder_cls=NoDecoding) -> TransformOutput:
        """
        Do a linear transform of X, for Nitro (non-linear) photonic cores.

        Parameters
        ----------
        X:  np.ndarray or torch.Tensor
            input vector, or batch of input vectors.
            Each vector must have the same dimensions as the one given in `fit1d` or `fit2d`.
        encoder_cls: encoding.base.BaseTransformer, optional
            class or instance of class that transform the input into binary vectors to be processed by the opu.
        decoder_cls: encoding.base.BaseTransformer, optional
            class or instance of class that transforms the output of the opu back into the appropriate format.

        Returns
        -------
        Y: np.ndarray or torch.Tensor
             complete array of nonlinear random projections of X,
             of size self.n_components
             If input is an ndarray, type is actually ContextArray,
             with a context attribute to add metadata
        """
        assert self._runner, "Call fit1d or fit2d before linear_transform"
        traits = self._runner.traits

        if traits.packed:
            # TODO implement for packed
            raise RuntimeError(
                "Linear transform isn't yet implemented for packed input :/")

        if inspect.isclass(encoder_cls):
            encoder = encoder_cls()
        else:
            encoder = encoder_cls

        X_enc = encoder.transform(X)

        user_input = OpuUserInput.from_traits(X_enc, traits)
        _, result_ctx = self._raw_linear_transform(X_enc, traits, user_input)
        # Decoding, add context, and optional convert back to torch if needed
        output = self._post_transform(result_ctx, user_input, encoder,
                                      decoder_cls)
        # Rescale the output, intentionally after the decoding step
        if self.rescale is OutputRescaling.variance:
            n_features = user_input.n_features_s
            output = output / (self._s.stdev * sqrt(n_features))
        elif self.rescale is OutputRescaling.norm:
            output = output / (self._s.stdev * sqrt(self.n_components))
        return output
Example #4
0
    def transform(self, X) -> TransformOutput:
        """
        Performs the nonlinear random projections of one or several input vectors.

        The `fit1d` or `fit2d` method must be called before, for setting vector dimensions
        or online option.
        If you need to transform one vector after each other,

        Parameters
        ----------
        X:  np.ndarray or torch.Tensor
            input vector, or batch of input vectors.
            Each vector must have the same dimensions as the one given in `fit1d` or `fit2d`.

        Returns
        -------
        Y: np.ndarray or torch.Tensor
             complete array of nonlinear random projections of X,
             of size self.n_components
             If input is an ndarray, type is actually ContextArray,
             with a context attribute to add metadata
        """
        assert self._runner, "Call fit1d or fit2d before transform"
        assert self.device.active, "OPU device isn't active, use opu.open() or \"with opu:\""

        user_input = OpuUserInput.from_traits(X, self._runner.traits)
        self._debug(str(user_input))

        if user_input.is_batch:
            # With batch input start acquisition first
            assert self.device.acq_state != AcqState.online, \
                "Can't transform a batch of vectors when acquisition is" \
                " in online mode, only single vectors"
            with self.device.acquiring(n_images=self._s.n_samples_by_pass):
                out = self._runner.transform(user_input)
        else:
            out = self._runner.transform(user_input)
        Y = user_input.reshape_output(out)
        # if the input is a tensor, return a tensor in CPU memory
        if user_input.is_tensor:
            # noinspection PyPackageRequirements
            import torch
            return torch.from_numpy(Y)
        else:
            return Y