Exemplo n.º 1
0
    def forward(self, *inputs):
        with torch.no_grad():
            assert len(self._input_names) == len(inputs) or (
                type(inputs[0]) == DataFrame and DataFrame is not None
                and len(self._input_names) == len(inputs[0].columns)
            ), "number of inputs or number of columns in the dataframe do not match with the expected number of inputs {}".format(
                self._input_names)

            if type(inputs[0]) == DataFrame and DataFrame is not None:
                # Split the dataframe into column ndarrays
                inputs = inputs[0]
                input_names = list(inputs.columns)
                splits = [
                    inputs[input_names[idx]] for idx in range(len(input_names))
                ]
                splits = [df.to_numpy().reshape(-1, 1) for df in splits]
                inputs = tuple(splits)
            inputs = [*inputs]
            variable_map = {}
            device = _get_device(self)

            # Maps data inputs to the expected variables.
            for i, input_name in enumerate(self._input_names):
                if type(inputs[i]) is list:
                    inputs[i] = np.array(inputs[i])
                if type(inputs[i]) is np.ndarray:
                    inputs[i] = torch.from_numpy(inputs[i])
                    if inputs[i].dtype == torch.float64:
                        # We convert double precision arrays into single precision. Sklearn does the same.
                        inputs[i] = inputs[i].float()
                elif type(inputs[i]) is not torch.Tensor:
                    raise RuntimeError(
                        "Inputer tensor {} of not supported type {}".format(
                            input_name, type(inputs[i])))
                if device is not None and device.type != "cpu":
                    inputs[i] = inputs[i].to(device)
                variable_map[input_name] = inputs[i]

            # Evaluate all the operators in the topology by properly wiring inputs \ outputs
            for operator in self._operators:
                pytorch_op = self._operator_map[operator.full_name]
                pytorch_outputs = pytorch_op(
                    *(variable_map[input]
                      for input in operator.input_full_names))

                if len(operator.output_full_names) == 1:
                    variable_map[
                        operator.output_full_names[0]] = pytorch_outputs
                else:
                    for i, output in enumerate(operator.output_full_names):
                        variable_map[output] = pytorch_outputs[i]

            # Prepare and return the output.
            if len(self._output_names) == 1:
                return variable_map[self._output_names[0]]
            else:
                return list(variable_map[output_name]
                            for output_name in self._output_names)
Exemplo n.º 2
0
    def decision_function(self, *inputs):
        device = _get_device(self.model)
        f = super(TorchScriptSklearnContainerAnomalyDetection, self)._decision_function
        f_wrapped = lambda x: _torchscript_wrapper(device, f, x)  # noqa: E731

        scores = self._run(f_wrapped, *inputs)

        if constants.IFOREST_THRESHOLD in self._extra_config:
            scores += self._extra_config[constants.IFOREST_THRESHOLD]
        return scores
Exemplo n.º 3
0
    def score_samples(self, *inputs):
        device = _get_device(self.model)
        f = self.decision_function
        f_wrapped = lambda x: _torchscript_wrapper(device, f, x)  # noqa: E731

        return self._run(f_wrapped, *inputs) + self._extra_config[constants.OFFSET]
Exemplo n.º 4
0
    def predict(self, *inputs):
        device = _get_device(self.model)
        f = super(TorchScriptSklearnContainerAnomalyDetection, self)._predict
        f_wrapped = lambda x: _torchscript_wrapper(device, f, x)  # noqa: E731

        return self._run(f_wrapped, *inputs)
Exemplo n.º 5
0
    def predict_proba(self, *inputs):
        device = _get_device(self.model)
        f = super(TorchScriptSklearnContainerClassification, self)._predict_proba
        f_wrapped = lambda *x: _torchscript_wrapper(device, f, *x)  # noqa: E731

        return self._run(f_wrapped, *inputs, reshape=True)
Exemplo n.º 6
0
    def transform(self, *inputs):
        device = _get_device(self.model)
        f = super(TorchScriptSklearnContainerTransformer, self)._transform
        f_wrapped = lambda x: _torchscript_wrapper(device, f, x)  # noqa: E731

        return self._run(f_wrapped, *inputs, reshape=True)