示例#1
0
    def Dist_Objective(predt: np.ndarray, data: lgb.Dataset):
        """A customized objective function to train each distributional parameter using custom gradient and hessian.

        """

        target = torch.tensor(data.get_label())

        # When num_class!= 0, preds has shape (n_obs, n_dist_param).
        # Each element in a row represents a raw prediction (leaf weight, hasn't gone through response function yet).
        predt = predt.reshape(-1, Gaussian.n_dist_param(), order="F")
        preds_location = Gaussian.param_dict()["location"](predt[:, 0])
        preds_location = torch.tensor(preds_location, requires_grad=True)

        preds_scale = Gaussian.param_dict()["scale"](predt[:, 1])
        preds_scale = torch.tensor(preds_scale, requires_grad=True)

        # Weights
        if data.get_weight() == None:
            # Use 1 as weight if no weights are specified
            weights = np.ones_like(target, dtype=float)
        else:
            weights = data.get_weight()

        # Initialize Gradient and Hessian Matrices
        grad = np.zeros(shape=(len(target), Gaussian.n_dist_param()))
        hess = np.zeros(shape=(len(target), Gaussian.n_dist_param()))

        # Specify Metric for Auto Derivation
        dGaussian = Normal(preds_location, preds_scale)
        autograd_metric = -dGaussian.log_prob(target).nansum()

        # Location
        grad[:, 0] = stabilize_derivative(
            auto_grad(metric=autograd_metric, parameter=preds_location, n=1) *
            weights, Gaussian.stabilize)

        hess[:, 0] = stabilize_derivative(
            auto_grad(metric=autograd_metric, parameter=preds_location, n=2) *
            weights, Gaussian.stabilize)

        # Scale
        grad[:, 1] = stabilize_derivative(
            auto_grad(metric=autograd_metric, parameter=preds_scale, n=1) *
            weights, Gaussian.stabilize)

        hess[:, 1] = stabilize_derivative(
            auto_grad(metric=autograd_metric, parameter=preds_scale, n=2) *
            weights, Gaussian.stabilize)

        # Reshaping
        grad = grad.ravel(order="F")
        hess = hess.ravel(order="F")

        return grad, hess
示例#2
0
    def Dist_Objective(predt: np.ndarray, data: lgb.Dataset):
        """A customized objective function to train each distributional parameter using custom gradient and hessian.

        """

        target = data.get_label()

        # When num_class!= 0, preds has shape (n_obs, n_dist_param).
        # Each element in a row represents a raw prediction (leaf weight, hasn't gone through response function yet).
        predt = predt.reshape(-1, Gaussian.n_dist_param(), order="F")
        preds_location = Gaussian.param_dict()["location"](predt[:, 0])
        preds_scale = Gaussian.param_dict()["scale"](predt[:, 1])


        # Weights
        if data.get_weight() == None:
            # Use 1 as weight if no weights are specified
            weights = np.ones_like(target, dtype=float)
        else:
            weights = data.get_weight()


        # Initialize Gradient and Hessian Matrices
        grad = np.zeros(shape=(len(target), Gaussian.n_dist_param()))
        hess = np.zeros(shape=(len(target), Gaussian.n_dist_param()))


        # Location
        grad[:, 0] = Gaussian.gradient_location(y=target,
                                                location=preds_location,
                                                scale=preds_scale,
                                                weights=weights)

        hess[:, 0] = Gaussian.hessian_location(scale=preds_scale,
                                               weights=weights)

        # Scale
        grad[:, 1] = Gaussian.gradient_scale(y=target,
                                             location=preds_location,
                                             scale=preds_scale,
                                             weights=weights)

        hess[:, 1] = Gaussian.hessian_scale(scale=preds_scale,
                                            weights=weights)

        # Reshaping
        grad = grad.ravel(order="F")
        hess = hess.ravel(order="F")

        return grad, hess
示例#3
0
    def _evaluate(self, scores: np.ndarray, clases: lgb.Dataset) -> Tuple[str, int, bool]:
        labels = clases.get_label()
        weights = clases.get_weight()
        score_corte = self.prob_corte

        nombre, valor = self._evaluar_funcion_ganancia(scores, labels, weights, score_corte)

        return nombre, valor, True
示例#4
0
    def Dist_Objective(predt: np.ndarray, data: lgb.Dataset):
        """A customized objective function to train each distributional parameter using custom gradient and hessian.

        """

        target = data.get_label()

        # When num_class!= 0, preds has shape (n_obs, n_dist_param).
        # Each element in a row represents a raw prediction (leaf weight, hasn't gone through response function yet).
        preds_expectile = predt.reshape(-1,
                                        Expectile.n_dist_param(),
                                        order="F")

        # Weights
        if data.get_weight() == None:
            # Use 1 as weight if no weights are specified
            weights = np.ones_like(target, dtype=float)
        else:
            weights = data.get_weight()

        # Initialize Gradient and Hessian Matrices
        grad = np.zeros(shape=(len(target), len(Expectile.expectiles)))
        hess = np.zeros(shape=(len(target), len(Expectile.expectiles)))

        for i in range(len(Expectile.expectiles)):
            grad[:, i] = Expectile.gradient_expectile(
                y=target,
                expectile=preds_expectile[:, i],
                tau=Expectile.expectiles[i],
                weights=weights)

            hess[:,
                 i] = Expectile.hessian_expectile(y=target,
                                                  expectile=preds_expectile[:,
                                                                            i],
                                                  tau=Expectile.expectiles[i],
                                                  weights=weights)

        # Reshaping
        grad = grad.ravel(order="F")
        hess = hess.ravel(order="F")

        return grad, hess
示例#5
0
    def __call__(self, pred: np.ndarray,
                 dtrain: lgb.Dataset) -> Tuple[str, float, bool]:

        label = dtrain.get_label()

        weights = dtrain.get_weight()

        if label.shape[0] != pred.shape[0]:
            pred = pred.reshape((label.shape[0], -1), order='F')
            label = label.astype(np.int32)

        pred = self.bw_func(pred)

        # for weighted case
        try:
            val = self.metric_func(label, pred, sample_weight=weights)
        except TypeError:
            val = self.metric_func(label, pred)

        # TODO: what if grouped case

        return 'Opt metric', val, self.greater_is_better