Exemplo n.º 1
0
 def __setstate__(self, state):
     self.__dict__.update(state)
     # Restore logging functions removed at getstate
     import lightonml
     self._debug = lightonml.get_debug_fn()
     self._trace = lightonml.get_trace_fn()
     self._print = lightonml.get_print_fn()
Exemplo n.º 2
0
    def __init__(self,
                 opu_type: str,
                 frametime_us: int,
                 exposure_us: int,
                 sequence_nb_prelim=0,
                 output_roi: Roi = None,
                 verbose=0,
                 name="opu"):

        if opu_type == "type1":
            from lightonopu import opu1_pybind
            self.__opu = opu1_pybind.OPU()
            self._output_shape_max = _output1_shape_max
            # With Model1 we just get the ROI in the middle
            self._output_roi_strategy = OutputRoiStrategy.mid_square
            self._output_roi_increment = 8
        elif opu_type == "type2":
            from lightonopu import opu2_pybind
            self.__opu = opu2_pybind.OPU()
            self._output_shape_max = _output2_shape_max
            # With Model2 we get max width in the middle
            self._output_roi_strategy = OutputRoiStrategy.mid_width
            self._output_roi_increment = 1
        elif opu_type == "type3":
            from lightonopu import opu3_pybind
            self.__opu = opu3_pybind.OPU()
            self._output_shape_max = _output3_shape_max
            # With Model2 we get max width in the middle
            self._output_roi_strategy = OutputRoiStrategy.mid_width
            self._output_roi_increment = 1
        else:
            raise TypeError("Don't know this OPU type: " + opu_type)

        # context for pid file
        self.pidfile = ExitStack()
        self.opu_type = opu_type
        # "off" fields allow to know what to send at resource acquisition
        # force to int if input is e.g. a float
        self._frametime_us_off = int(frametime_us)
        self._exposure_us_off = int(exposure_us)
        self._gain_dB_off = default_gain_dB
        self._output_roi_off = output_roi
        self._reserved_off = 0
        self._sequence_nb_prelim = sequence_nb_prelim
        self.name = name

        self.verbose = verbose
        from lightonml import get_trace_fn, get_debug_fn
        self._trace = get_trace_fn()
        self._debug = get_debug_fn()

        # forward opu interface to class
        self.transform1 = self.__opu.transform1
        self.transform2 = self.__opu.transform2
        self.transform_single = self.__opu.transform_single
        self.transform_online = self.__opu.transform_online
        if hasattr(self.__opu, "transform_online_test"):
            self.transform_online_test = self.__opu.transform_online_test
Exemplo n.º 3
0
    def __init__(self,
                 opu_settings: "OpuSettings",
                 settings: "TransformSettings",
                 traits: InputTraits,
                 device: OpuDevice = None,
                 roi_compute: Callable = None,
                 disable_pbar=False):
        self.s = opu_settings  # OPU settings
        self.t = settings  # User's transform settings
        self.device = device  # Device for running the transform
        self._traits = traits

        # Whether input's n_feature matches the max number of features
        # If yes, no need to format (use plain formatter)
        self.input_matches_max = self._traits.n_features_s == self.s.max_n_features

        from lightonml import get_print_fn, get_trace_fn, get_debug_fn
        self._print = get_print_fn()
        self._trace = get_trace_fn()
        self._debug = get_debug_fn()
        self.disable_pbar = disable_pbar

        # ones_info says whether the input has sufficient ones ratio
        # can be None if not automatic input_roi, actually filled in FitTransformRunner
        self.ones_info = {}

        # Do input traits checks
        if self._traits.n_features_s > self.s.max_n_features:
            raise ValueError(
                "input's number of features ({}) can't be greater than {}".
                format(self._traits.n_features_s, self.s.max_n_features))

        # Get the function used to compute ROI, uses self if not coming from child FitTransformRunner
        if roi_compute is None:
            roi_compute = self._roi_compute

        # Get a formatter
        if not self.s.simulated:
            self.formatter = self._configure_formatting(roi_compute)
        else:
            # Simulated OPU just needs a formatter that flattens 2D features
            self.formatter = FlatFormatter(self._traits.n_features)

        self._debug(
            "Formatter {} with element size {}. Input ROI: {}, {} ".format(
                self.formatter.fmt_type.name, self.formatter.factor,
                self.formatter.roi_offset, self.formatter.roi_size))

        # allocate intermediate buffer for batch
        # batch_size = self._traits.n_samples_s
        # Buffer size must be 0 to allocate a single target
        # buffer_size = batch_size if batch_size > 1 else 0
        # buffer for online mode
        self.buffer = self.formatter.empty_target(0)
        # First adjust exposure
        self._adjust_exposure()
        self._adjust_frametime()
Exemplo n.º 4
0
 def __setstate__(self, state):
     self.__dict__.update(state)
     # Restore logging functions removed at getstate
     self._debug = lightonml.get_debug_fn()
     self._trace = lightonml.get_trace_fn()
     self._print = lightonml.get_print_fn()
     self._acq_stack = ExitStack()
     # Restore online acquisition if it was the case
     if state.get("__online_acq", False):
         self._acq_stack.enter_context(self.device.acquiring(online=True))
Exemplo n.º 5
0
    def __init__(self, n_features, target_size):
        if isinstance(n_features, (list, tuple, np.ndarray)):
            self.n_features = np.asarray(n_features)
            assert issubclass(self.n_features.dtype.type, np.integer)
        elif isinstance(n_features, numbers.Integral):
            self.n_features = np.asarray([n_features])
        else:
            raise ValueError("n_features must be array-like or int")
        assert self.features_ndim in [1, 2], "n_features should be 1d or 2d"

        self.target_size = target_size
        self.fmt_type = types.FeaturesFormat.none
        self.factor = 0
        self.roi_size = self.roi_offset = (0, 0)
        from lightonml import get_trace_fn
        self._trace = get_trace_fn()
Exemplo n.º 6
0
    def __init__(self,
                 n_components: int = 200000,
                 opu_device: Optional[Union["OpuDevice",
                                            SimulatedOpuDevice]] = None,
                 max_n_features: int = 1000,
                 config_file: str = "",
                 config_override: dict = None,
                 verbose_level: int = -1,
                 input_roi_strategy: types.InputRoiStrategy = types.
                 InputRoiStrategy.full,
                 open_at_init: bool = None,
                 disable_pbar=False,
                 simulated=False,
                 rescale: Union[OutputRescaling,
                                str] = OutputRescaling.variance):

        self.__opu_config = None
        self.__config_file = config_file
        self.__config_override = config_override
        self._max_n_features = max_n_features
        self.disable_pbar = disable_pbar
        self.rescale = rescale

        # Get trace and print functions
        if verbose_level != -1:
            warnings.warn(
                "Verbose level arg will removed in 1.3, "
                "Use lightonml.set_verbose_level instead", DeprecationWarning)
            lightonml.set_verbose_level(verbose_level)
        else:
            verbose_level = lightonml.get_verbose_level()
        self._debug = lightonml.get_debug_fn()
        self._trace = lightonml.get_trace_fn()
        self._print = lightonml.get_print_fn()
        no_config_msg = "No configuration files for the OPU was found on this machine.\n" \
                        "You may want to run the OPU in a simulated manner, by passing the " \
                        "simulated argument to True at init.\n" \
                        "See https://docs.lighton.ai/notes/get_started.html#Simulating-an-OPU " \
                        "for more details.\n" \
                        "See also https://lighton.ai/products for getting access to our technology."

        if simulated and opu_device is not None:
            raise ValueError(
                "simulated and opu_device arguments are conflicting")

        # Device init, or take the one passed as input
        if opu_device:
            if type(opu_device).__name__ not in [
                    "SimulatedOpuDevice", "OpuDevice"
            ]:
                raise TypeError(
                    "opu_device must be of type SimulatedOpuDevice or OpuDevice"
                )
            self.device = opu_device
        elif simulated:
            self.device = SimulatedOpuDevice()
        else:
            # Instantiate device directly
            from lightonopu.internal.device import OpuDevice
            if not self.__config_file and not config.host_has_opu_config():
                # Looks like there's no OPU on this host as we didn't find configuration files
                raise RuntimeError(no_config_msg)
            opu_type = self.config["type"]
            frametime_us = self.config["input"]["frametime_us"]
            exposure_us = self.config["output"]["exposure_us"]
            seq_nb_prelim = self.config.get("sequence_nb_prelim", 0)
            name = self.config["name"]
            self.device = OpuDevice(opu_type, frametime_us, exposure_us,
                                    seq_nb_prelim, None, verbose_level, name)

        self._base_frametime_us = self.device.frametime_us
        self._base_exposure_us = self.device.exposure_us

        if self._s.simulated:
            # build the random matrix if not done already
            self._resize_rnd_matrix(max_n_features, n_components)
        else:
            # Make sure lightonopu is at 1.4.1 or later, needed for linear_reconstruction
            pkg_resources.require("lightonopu>=1.4.1")
            # initialize linear_reconstruction library
            from lightonopu import linear_reconstruction
            linear_reconstruction.init(np.prod(self.device.input_shape))

            self._output_roi = output_roi.OutputRoi(
                self.device.output_shape_max, self.device.output_roi_strategy,
                self._s.allowed_roi, self._s.min_n_components)
        # This also sets the output ROI
        self.n_components = n_components
        self.input_roi_strategy = input_roi_strategy
        # Runner initialized when entering fit
        self._runner = None  # type: Optional[TransformRunner]
        # ExitStack for device acquisition, initialized when entering fit
        self._acq_stack = ExitStack()
        self._trace("OPU initialized")

        # Open at init, unless relevant host.json option is False
        if open_at_init is None:
            open_at_init = get_host_option("lightonml_open_at_init", True)
        if open_at_init:
            self.open()
Exemplo n.º 7
0
    def __init__(self, n_components: int = 200000,
                 opu_device: Optional[Union[OpuDevice, SimulatedOpuDevice]] = None,
                 max_n_features: int = 1000, config_file: str = "",
                 config_override: dict = None, verbose_level: int = -1,
                 input_roi_strategy: types.InputRoiStrategy = types.InputRoiStrategy.auto,
                 open_at_init: bool = None, disable_pbar=False):

        self.__opu_config = None
        self.__config_file = config_file
        self.__config_override = config_override
        self._max_n_features = max_n_features
        self.disable_pbar = disable_pbar

        # Get trace and print functions
        if verbose_level != -1:
            warnings.warn("Verbose level arg will removed in 1.3, "
                          "Use lightonml.set_verbose_level instead",
                          DeprecationWarning)
            lightonml.set_verbose_level(verbose_level)
        else:
            verbose_level = lightonml.get_verbose_level()
        self._debug = lightonml.get_debug_fn()
        self._trace = lightonml.get_trace_fn()
        self._print = lightonml.get_print_fn()

        # Device init, or take the one passed as input
        if not opu_device:
            opu_type = self.config["type"]
            self._base_frametime_us = self.config["input"]["frametime_us"]
            self._base_exposure_us = self.config["output"]["exposure_us"]
            seq_nb_prelim = self.config.get("sequence_nb_prelim", 0)
            name = self.config["name"]
            self.device = OpuDevice(opu_type, self._base_frametime_us,
                                    self._base_exposure_us, seq_nb_prelim,
                                    None, verbose_level, name)
        else:
            if not isinstance(opu_device, (SimulatedOpuDevice, OpuDevice)):
                raise TypeError("opu_device must be of type {} or {}"
                                .format(SimulatedOpuDevice.__qualname__,
                                        OpuDevice.__qualname__))
            self.device = opu_device
            self._base_frametime_us = self.device.frametime_us
            self._base_exposure_us = self.device.exposure_us

        if self._s.simulated:
            # build the random matrix if not done already
            self._resize_rnd_matrix(max_n_features, n_components)

        self._output_roi = output_roi.OutputRoi(self.device.output_shape_max,
                                                self.device.output_roi_strategy,
                                                self._s.allowed_roi, self._s.min_n_components)
        # This also sets the output ROI
        self.n_components = n_components
        self.input_roi_strategy = input_roi_strategy
        # Runner initialized when entering fit
        self._runner = None  # type: Optional[TransformRunner]
        # ExitStack for device acquisition, initialized when entering fit
        self._acq_stack = ExitStack()
        self._trace("OPU initialized")

        # Open at init, unless relevant host.json option is False
        if open_at_init is None:
            open_at_init = get_host_option("lightonml_open_at_init", True)
        if open_at_init:
            self.open()
Exemplo n.º 8
0
 def __setstate__(self, state):
     self.__dict__.update(state)
     # Restore logging functions removed at getstate
     from lightonml import get_trace_fn
     self._trace = get_trace_fn()