def predict(self, *inputs): device = get_device(self.model) f = super(TorchScriptSklearnContainerAnomalyDetection, self)._predict f_wrapped = lambda x: _torchscript_wrapper( device, f, x, extra_config=self._extra_config) # noqa: E731 return self._run(f_wrapped, *inputs)
def transform(self, *inputs): device = get_device(self.model) f = super(TorchScriptSklearnContainerTransformer, self)._transform f_wrapped = lambda x: _torchscript_wrapper( device, f, x, extra_config=self._extra_config) # noqa: E731 return self._run(f_wrapped, *inputs)
def score_samples(self, *inputs): device = get_device(self.model) f = self.decision_function f_wrapped = lambda x: _torchscript_wrapper( device, f, x, extra_config=self._extra_config) # noqa: E731 return self._run(f_wrapped, * inputs) + self._extra_config[constants.OFFSET]
def predict_proba(self, *inputs): device = get_device(self.model) f = super(TorchScriptSklearnContainerClassification, self)._predict_proba f_wrapped = lambda *x: _torchscript_wrapper( device, f, *x, extra_config=self._extra_config) # noqa: E731 return self._run(f_wrapped, *inputs)
def decision_function(self, *inputs): device = get_device(self.model) f = super(TorchScriptSklearnContainerAnomalyDetection, self)._decision_function f_wrapped = lambda x: _torchscript_wrapper(device, f, x, extra_config=self._extra_config) # noqa: E731 scores = self._run(f_wrapped, *inputs) if constants.IFOREST_THRESHOLD in self._extra_config: scores += self._extra_config[constants.IFOREST_THRESHOLD] return scores
def forward(self, *inputs): with torch.no_grad(): assert len(self._input_names) == len(inputs) or ( type(inputs[0]) == DataFrame and DataFrame is not None and not self.check_dataframe_to_array and len(self._input_names) == len(inputs[0].columns) ), "number of inputs or number of columns in the dataframe do not match with the expected number of inputs {}".format( self._input_names) if type(inputs[0]) == DataFrame and DataFrame is not None: # Split the dataframe into column ndarrays. inputs = inputs[0] input_names = list(inputs.columns) splits = [ inputs[input_names[idx]] for idx in range(len(input_names)) ] splits = [df.to_numpy().reshape(-1, 1) for df in splits] inputs = tuple(splits) inputs = [*inputs] variable_map = {} device = get_device(self) # Maps data inputs to the expected variables. for i, input_name in enumerate(self._input_names): input_ = inputs[i] if type(input_) is list: input_ = np.array(input_) if type(input_) is np.ndarray: # Convert string arrays into int32. if input_.dtype.kind in constants.SUPPORTED_STRING_TYPES: assert self.max_string_length is not None input_ = from_strings_to_ints(input_, self.max_string_length) input_ = torch.from_numpy(input_) elif type(input_) is not torch.Tensor: raise RuntimeError( "Inputer tensor {} of not supported type {}".format( input_name, type(input_))) if input_.dtype == torch.float64: # We convert double precision arrays into single precision. Sklearn does the same. input_ = input_.float() if device is not None and device.type != "cpu": input_ = input_.to(device) variable_map[input_name] = input_ # Evaluate all the operators in the topology by properly wiring inputs \ outputs for operator in self._operators: outputs = operator(*(variable_map[input_name] for input_name in operator.inputs)) if len(operator.outputs) == 1: variable_map[operator.outputs[0]] = outputs else: for i, output_name in enumerate(operator.outputs): variable_map[output_name] = outputs[i] # Prepare and return the output. if len(self._output_names) == 1: return variable_map[self._output_names[0]] else: return tuple(variable_map[output_name] for output_name in self._output_names)