def __setstate__(self, state): self.__dict__.update(state) # Restore logging functions removed at getstate import lightonml self._debug = lightonml.get_debug_fn() self._trace = lightonml.get_trace_fn() self._print = lightonml.get_print_fn()
def __init__(self, opu_settings: "OpuSettings", settings: "TransformSettings", traits: InputTraits, device: OpuDevice = None, roi_compute: Callable = None, disable_pbar=False): self.s = opu_settings # OPU settings self.t = settings # User's transform settings self.device = device # Device for running the transform self._traits = traits # Whether input's n_feature matches the max number of features # If yes, no need to format (use plain formatter) self.input_matches_max = self._traits.n_features_s == self.s.max_n_features from lightonml import get_print_fn, get_trace_fn, get_debug_fn self._print = get_print_fn() self._trace = get_trace_fn() self._debug = get_debug_fn() self.disable_pbar = disable_pbar # ones_info says whether the input has sufficient ones ratio # can be None if not automatic input_roi, actually filled in FitTransformRunner self.ones_info = {} # Do input traits checks if self._traits.n_features_s > self.s.max_n_features: raise ValueError( "input's number of features ({}) can't be greater than {}". format(self._traits.n_features_s, self.s.max_n_features)) # Get the function used to compute ROI, uses self if not coming from child FitTransformRunner if roi_compute is None: roi_compute = self._roi_compute # Get a formatter if not self.s.simulated: self.formatter = self._configure_formatting(roi_compute) else: # Simulated OPU just needs a formatter that flattens 2D features self.formatter = FlatFormatter(self._traits.n_features) self._debug( "Formatter {} with element size {}. Input ROI: {}, {} ".format( self.formatter.fmt_type.name, self.formatter.factor, self.formatter.roi_offset, self.formatter.roi_size)) # allocate intermediate buffer for batch # batch_size = self._traits.n_samples_s # Buffer size must be 0 to allocate a single target # buffer_size = batch_size if batch_size > 1 else 0 # buffer for online mode self.buffer = self.formatter.empty_target(0) # First adjust exposure self._adjust_exposure() self._adjust_frametime()
def __setstate__(self, state): self.__dict__.update(state) # Restore logging functions removed at getstate self._debug = lightonml.get_debug_fn() self._trace = lightonml.get_trace_fn() self._print = lightonml.get_print_fn() self._acq_stack = ExitStack() # Restore online acquisition if it was the case if state.get("__online_acq", False): self._acq_stack.enter_context(self.device.acquiring(online=True))
def __init__(self, n_components: int = 200000, opu_device: Optional[Union["OpuDevice", SimulatedOpuDevice]] = None, max_n_features: int = 1000, config_file: str = "", config_override: dict = None, verbose_level: int = -1, input_roi_strategy: types.InputRoiStrategy = types. InputRoiStrategy.full, open_at_init: bool = None, disable_pbar=False, simulated=False, rescale: Union[OutputRescaling, str] = OutputRescaling.variance): self.__opu_config = None self.__config_file = config_file self.__config_override = config_override self._max_n_features = max_n_features self.disable_pbar = disable_pbar self.rescale = rescale # Get trace and print functions if verbose_level != -1: warnings.warn( "Verbose level arg will removed in 1.3, " "Use lightonml.set_verbose_level instead", DeprecationWarning) lightonml.set_verbose_level(verbose_level) else: verbose_level = lightonml.get_verbose_level() self._debug = lightonml.get_debug_fn() self._trace = lightonml.get_trace_fn() self._print = lightonml.get_print_fn() no_config_msg = "No configuration files for the OPU was found on this machine.\n" \ "You may want to run the OPU in a simulated manner, by passing the " \ "simulated argument to True at init.\n" \ "See https://docs.lighton.ai/notes/get_started.html#Simulating-an-OPU " \ "for more details.\n" \ "See also https://lighton.ai/products for getting access to our technology." if simulated and opu_device is not None: raise ValueError( "simulated and opu_device arguments are conflicting") # Device init, or take the one passed as input if opu_device: if type(opu_device).__name__ not in [ "SimulatedOpuDevice", "OpuDevice" ]: raise TypeError( "opu_device must be of type SimulatedOpuDevice or OpuDevice" ) self.device = opu_device elif simulated: self.device = SimulatedOpuDevice() else: # Instantiate device directly from lightonopu.internal.device import OpuDevice if not self.__config_file and not config.host_has_opu_config(): # Looks like there's no OPU on this host as we didn't find configuration files raise RuntimeError(no_config_msg) opu_type = self.config["type"] frametime_us = self.config["input"]["frametime_us"] exposure_us = self.config["output"]["exposure_us"] seq_nb_prelim = self.config.get("sequence_nb_prelim", 0) name = self.config["name"] self.device = OpuDevice(opu_type, frametime_us, exposure_us, seq_nb_prelim, None, verbose_level, name) self._base_frametime_us = self.device.frametime_us self._base_exposure_us = self.device.exposure_us if self._s.simulated: # build the random matrix if not done already self._resize_rnd_matrix(max_n_features, n_components) else: # Make sure lightonopu is at 1.4.1 or later, needed for linear_reconstruction pkg_resources.require("lightonopu>=1.4.1") # initialize linear_reconstruction library from lightonopu import linear_reconstruction linear_reconstruction.init(np.prod(self.device.input_shape)) self._output_roi = output_roi.OutputRoi( self.device.output_shape_max, self.device.output_roi_strategy, self._s.allowed_roi, self._s.min_n_components) # This also sets the output ROI self.n_components = n_components self.input_roi_strategy = input_roi_strategy # Runner initialized when entering fit self._runner = None # type: Optional[TransformRunner] # ExitStack for device acquisition, initialized when entering fit self._acq_stack = ExitStack() self._trace("OPU initialized") # Open at init, unless relevant host.json option is False if open_at_init is None: open_at_init = get_host_option("lightonml_open_at_init", True) if open_at_init: self.open()
def __init__(self, n_components: int = 200000, opu_device: Optional[Union[OpuDevice, SimulatedOpuDevice]] = None, max_n_features: int = 1000, config_file: str = "", config_override: dict = None, verbose_level: int = -1, input_roi_strategy: types.InputRoiStrategy = types.InputRoiStrategy.auto, open_at_init: bool = None, disable_pbar=False): self.__opu_config = None self.__config_file = config_file self.__config_override = config_override self._max_n_features = max_n_features self.disable_pbar = disable_pbar # Get trace and print functions if verbose_level != -1: warnings.warn("Verbose level arg will removed in 1.3, " "Use lightonml.set_verbose_level instead", DeprecationWarning) lightonml.set_verbose_level(verbose_level) else: verbose_level = lightonml.get_verbose_level() self._debug = lightonml.get_debug_fn() self._trace = lightonml.get_trace_fn() self._print = lightonml.get_print_fn() # Device init, or take the one passed as input if not opu_device: opu_type = self.config["type"] self._base_frametime_us = self.config["input"]["frametime_us"] self._base_exposure_us = self.config["output"]["exposure_us"] seq_nb_prelim = self.config.get("sequence_nb_prelim", 0) name = self.config["name"] self.device = OpuDevice(opu_type, self._base_frametime_us, self._base_exposure_us, seq_nb_prelim, None, verbose_level, name) else: if not isinstance(opu_device, (SimulatedOpuDevice, OpuDevice)): raise TypeError("opu_device must be of type {} or {}" .format(SimulatedOpuDevice.__qualname__, OpuDevice.__qualname__)) self.device = opu_device self._base_frametime_us = self.device.frametime_us self._base_exposure_us = self.device.exposure_us if self._s.simulated: # build the random matrix if not done already self._resize_rnd_matrix(max_n_features, n_components) self._output_roi = output_roi.OutputRoi(self.device.output_shape_max, self.device.output_roi_strategy, self._s.allowed_roi, self._s.min_n_components) # This also sets the output ROI self.n_components = n_components self.input_roi_strategy = input_roi_strategy # Runner initialized when entering fit self._runner = None # type: Optional[TransformRunner] # ExitStack for device acquisition, initialized when entering fit self._acq_stack = ExitStack() self._trace("OPU initialized") # Open at init, unless relevant host.json option is False if open_at_init is None: open_at_init = get_host_option("lightonml_open_at_init", True) if open_at_init: self.open()