Пример #1
0
        def kernel_scatter(size, shift, shuffled, scanblocksum, localscan,
                           shuffled_sorted, indices, indices_sorted,
                           store_indices):
            tid = hsa.get_local_id(0)
            blkid = hsa.get_group_id(0)
            gid = hsa.get_global_id(0)

            if gid < size:
                curdata = uintp(shuffled[blkid, tid])
                data_radix = uintp((curdata >> uintp(shift)) &
                                   uintp(RADIX_MINUS_1))
                pos = scanblocksum[data_radix, blkid] + localscan[blkid, tid]
                shuffled_sorted[pos] = curdata

                if store_indices:
                    indices_sorted[pos] = indices[gid]
Пример #2
0
        def kernel_local_shuffle(data, size, shift, blocksum, localscan,
                                 shuffled, indices, store_indices):
            tid = hsa.get_local_id(0)
            blkid = hsa.get_group_id(0)
            blksz = localscan.shape[1]

            sm_mask = hsa.shared.array(shape=mask_shape, dtype=int32)
            sm_blocksum = hsa.shared.array(shape=4, dtype=int32)
            sm_shuffled = hsa.shared.array(shape=block_size, dtype=uintp)
            sm_indices = hsa.shared.array(shape=block_size, dtype=uintp)
            sm_localscan = hsa.shared.array(shape=block_size, dtype=int32)
            sm_localscan[tid] = -1

            dataid = blkid * blksz + tid
            valid = dataid < size and tid < blksz
            curdata = uintp(data[dataid] if valid else uintp(0))
            processed_data = uintp((curdata >> uintp(shift)) &
                                   uintp(RADIX_MINUS_1))

            chunk_offset, scanval = four_way_scan(processed_data, sm_mask,
                                                  sm_blocksum, blksz, valid)

            if tid < RADIX:
                blocksum[tid, blkid] = sm_blocksum[tid]

            if tid < blksz:
                # Store local scan value
                where = chunk_offset + scanval
                # Store shuffled value and indices
                shuffled[blkid, where] = curdata
                if store_indices and valid:
                    sm_indices[where] = indices[dataid]
                sm_localscan[where] = scanval

            # Cleanup
            hsa.barrier()
            if tid < blksz:
                # shuffled[blkid, tid] = sm_shuffled[tid]
                if store_indices and valid:
                    indices[dataid] = sm_indices[tid]
                localscan[blkid, tid] = sm_localscan[tid]
Пример #3
0
    def fit(self, X, y, train_indices, valid_indices, sample_weights):
        max_bins = self.n_bins - 1
        random_state = self.random_state
        # TODO: on obtiendra cette info via le binner qui est dans la foret
        n_samples, n_features = X.shape
        n_bins_per_feature = max_bins * np.ones(n_features)
        n_bins_per_feature = n_bins_per_feature.astype(np.intp)

        # Create the tree object, which is mostly a data container for the nodes
        tree = _TreeRegressor(n_features, random_state)

        # We build a tree context, that contains global information about
        # the data, in particular the way we'll organize data into contiguous
        # node indexes both for training and validation samples
        tree_context = TreeRegressorContext(
            X,
            y,
            sample_weights,
            train_indices,
            valid_indices,
            self.n_bins - 1,
            n_bins_per_feature,
            uintp(self.max_features),
            self.aggregation,
            float32(self.step),
        )

        node_context = NodeRegressorContext(tree_context)
        best_split = SplitRegressor()
        candidate_split = SplitRegressor()
        compute_node_context = compute_node_regressor_context

        grow(
            tree,
            tree_context,
            node_context,
            compute_node_context,
            find_best_split_regressor_along_feature,
            copy_split_regressor,
            best_split,
            candidate_split,
        )
        self._train_indices = train_indices
        self._valid_indices = valid_indices
        self._tree = tree
        self._tree_context = tree_context
        return self
Пример #4
0
        else:
            # Otherwise, we resize using the specified capacity
            resize_tree_(tree, capacity)


@jit(
    uintp(
        TreeType,
        intp,
        uintp,
        boolean,
        boolean,
        uintp,
        float32,
        uint8,
        float32,
        uintp,
        uintp,
        float32,
        float32,
        uintp,
        uintp,
        uintp,
        uintp,
        float32,
    ),
    nopython=True,
    nogil=True,
    locals={
        "node_idx": uintp,
        "nodes": node_type[::1],
        "node": node_type
Пример #5
0
def recompute_node_predictions(tree, tree_context, dirichlet):
    """This function recomputes the node predictions and validation loss of nodes.
    This is triggered by a change of the dirichlet parameter.

    Parameters
    ----------
    tree : TreeClassifier
        The tree object which holds nodes and prediction data

    tree_context : TreeClassifierContext
        A tree context which will contain tree-level information that is useful to
        find splits

    dirichlet : float
        The dirichlet parameter

    """
    nodes = tree.nodes
    n_classes = tree.n_classes
    y_pred = np.zeros(n_classes, dtype=np.float32)
    y = tree_context.y
    sample_weights = tree_context.sample_weights
    partition_train = tree_context.partition_train
    partition_valid = tree_context.partition_valid

    # Recompute y_pred with the new dirichlet parameter
    for node_idx, node in enumerate(nodes[: tree.node_count]):
        w_samples_train = 0.0
        train_indices = partition_train[node["start_train"] : node["end_train"]]
        y_pred.fill(0.0)
        for sample in train_indices:
            label = uintp(y[sample])
            sample_weight = sample_weights[sample]
            w_samples_train += sample_weight
            y_pred[label] += sample_weight

        for k in range(n_classes):
            y_pred[k] = (y_pred[k] + dirichlet) / (
                w_samples_train + n_classes * dirichlet
            )

        tree.y_pred[node_idx, :] = y_pred

        # recompute valid_losses
        valid_indices = partition_valid[node["start_valid"] : node["end_valid"]]
        loss_valid = 0.0
        w_samples_valid = 0.0

        for sample in valid_indices:
            sample_weight = sample_weights[sample]
            w_samples_valid += sample_weight
            label = uintp(y[sample])
            # TODO: aggregation loss is hard-coded here. Call a function instead
            #  when implementing other losses
            loss_valid -= sample_weight * log(y_pred[label])

        node["loss_valid"] = loss_valid

    # TODO : compute tree weights anyway ? the program can be duped by switching
    #  aggregation off, changing dirichlet and switching aggregation back on
    if tree_context.aggregation:
        compute_tree_weights(tree.nodes, tree.node_count, tree_context.step)
Пример #6
0
from numba.experimental import jitclass
from ._split import find_node_split, split_indices
from ._node import node_type
from ._tree import add_node_tree, resize_tree, TREE_UNDEFINED, TreeClassifierType
from ._tree_context import TreeClassifierContextType
from ._utils import (
    NOPYTHON,
    NOGIL,
    BOUNDSCHECK,
    FASTMATH,
    resize,
    log_sum_2_exp,
    get_type,
)

INITIAL_STACK_SIZE = uintp(10)

eps = np.finfo("float32").eps

record_dtype = np.dtype(
    [
        ("parent", np.intp),
        ("depth", np.uintp),
        ("is_left", np.bool_),
        ("impurity", np.float32),
        ("start_train", np.uintp),
        ("end_train", np.uintp),
        ("start_valid", np.uintp),
        ("end_valid", np.uintp),
    ]
)
Пример #7
0
            # Otherwise, we resize using the specified capacity
            resize_tree_(tree, capacity)


@jit(
    [
        uintp(
            TreeClassifierType,
            intp,
            uintp,
            boolean,
            boolean,
            uintp,
            float32,
            uint8,
            float32,
            uintp,
            uintp,
            float32,
            float32,
            uintp,
            uintp,
            uintp,
            uintp,
            float32,
        ),
        uintp(
            TreeRegressorType,
            intp,
            uintp,
            boolean,
            boolean,
Пример #8
0
def compute_node_classifier_context(tree_context, node_context, start_train,
                                    end_train, start_valid, end_valid):
    """Computes the node context from the data and from the tree context for
    classification. Computations are saved in the passed node_context.

    Parameters
    ----------
    tree_context : TreeContext
        The tree context

    node_context : NodeClassifierContext
        The node context that this function will compute

    start_train : int
        Index of the first training sample in the node. We have that
        partition_train[start_train:end_train] contains the indexes of the node's
        training samples

    end_train : int
        End-index of the slice containing the node's training samples indexes

    start_valid : int
        Index of the first validation (out-of-the-bag) sample in the node. We have
        that partition_valid[start_valid:end_valid] contains the indexes of the
        node's validation samples

    end_valid : int
        End-index of the slice containing the node's validation samples indexes
    """
    n_samples_train_in_bins = node_context.n_samples_train_in_bins
    n_samples_valid_in_bins = node_context.n_samples_valid_in_bins
    w_samples_train_in_bins = node_context.w_samples_train_in_bins
    w_samples_valid_in_bins = node_context.w_samples_valid_in_bins
    non_empty_bins_train = node_context.non_empty_bins_train
    non_empty_bins_train_count = node_context.non_empty_bins_train_count
    non_empty_bins_valid = node_context.non_empty_bins_valid
    non_empty_bins_valid_count = node_context.non_empty_bins_valid_count
    y_sum = node_context.y_sum
    y_pred = node_context.y_pred

    # If necessary, sample the features
    if node_context.sample_features:
        sample_without_replacement(node_context.features_pool,
                                   node_context.features_sampled)

    features = node_context.features_sampled
    n_samples_train_in_bins.fill(0)
    n_samples_valid_in_bins.fill(0)
    w_samples_train_in_bins.fill(0.0)
    w_samples_valid_in_bins.fill(0.0)
    non_empty_bins_train.fill(0)
    non_empty_bins_train_count.fill(0)
    non_empty_bins_valid.fill(0)
    non_empty_bins_valid_count.fill(0)
    y_sum.fill(0.0)
    y_pred.fill(0.0)

    # Get information from the tree context
    features_bitarray = tree_context.features_bitarray
    n_values_in_words = features_bitarray.n_values_in_words
    offsets = features_bitarray.offsets
    bitarray = features_bitarray.bitarray
    n_bits = features_bitarray.n_bits
    bitmasks = features_bitarray.bitmasks

    y = tree_context.y
    sample_weights = tree_context.sample_weights
    partition_train = tree_context.partition_train
    partition_valid = tree_context.partition_valid
    n_classes = tree_context.n_classes
    dirichlet = tree_context.dirichlet
    aggregation = tree_context.aggregation

    # The indices of the training samples contained in the node
    train_indices = partition_train[start_train:end_train]
    valid_indices = partition_valid[start_valid:end_valid]

    # Weighted number of training and validation samples
    w_samples_train = 0.0
    w_samples_valid = 0.0

    # A counter for the features
    f = 0
    # The validation loss
    loss_valid = 0.0

    # TODO: unrolling the for loop could be faster
    # For-loop on features first and then samples (the bitarray is F-major)
    for j in features:
        n_values_in_word = n_values_in_words[j]
        bitarray_feature = bitarray[offsets[j]:offsets[j + 1]]
        n_bits_feature = n_bits[j]
        bitmask = bitmasks[j]
        for i in train_indices:
            # This gives bin = X[i, j]
            bin = get_value_from_column(i, bitarray_feature, bitmask,
                                        n_values_in_word, n_bits_feature)
            label = uintp(y[i])
            sample_weight = sample_weights[i]
            if f == 0:
                w_samples_train += sample_weight
                y_pred[label] += sample_weight

            if n_samples_train_in_bins[f, bin] == 0:
                # It's the first time we find a train sample for this (feature, bin)
                # We save the bin number at index non_empty_bins_train_count[f]
                non_empty_bins_train[f, non_empty_bins_train_count[f]] = bin
                # We increase the count of non-empty bins for this feature
                non_empty_bins_train_count[f] += 1

            # One more sample in this bin for the current feature
            n_samples_train_in_bins[f, bin] += 1
            w_samples_train_in_bins[f, bin] += sample_weight
            # One more sample in this bin for the current feature with this label
            y_sum[f, bin, label] += sample_weight

        # TODO: we should put this outside so that we can change the dirichlet
        #  parameter without re-growing the tree
        # The prediction is given by the formula
        #   y_k = (n_k + dirichlet) / (n_samples + dirichlet * n_classes)
        # where n_k is the number of samples with label class k
        if f == 0:
            for k in range(n_classes):
                y_pred[k] = (y_pred[k] + dirichlet) / (w_samples_train +
                                                       n_classes * dirichlet)

        # Compute sample counts about validation samples
        if aggregation:
            for i in valid_indices:
                bin = get_value_from_column(i, bitarray_feature, bitmask,
                                            n_values_in_word, n_bits_feature)
                sample_weight = sample_weights[i]
                if f == 0:
                    w_samples_valid += sample_weight
                    label = uintp(y[i])
                    # TODO: aggregation loss is hard-coded here. Call a function instead
                    #  when implementing other losses
                    loss_valid -= sample_weight * log(y_pred[label])

                if n_samples_valid_in_bins[f, bin] == 0.0:
                    # It's the first time we find a valid sample for this (feature, bin)
                    # We save the bin number at index non_empty_bins_valid_count[f]
                    non_empty_bins_valid[f,
                                         non_empty_bins_valid_count[f]] = bin
                    # We increase the count of non-empty bins for this feature
                    non_empty_bins_valid_count[f] += 1

                n_samples_valid_in_bins[f, bin] += 1
                w_samples_valid_in_bins[f, bin] += sample_weight

        f += 1

    # Save remaining things in the node context
    node_context.n_samples_train = end_train - start_train
    node_context.n_samples_valid = end_valid - start_valid
    node_context.loss_valid = loss_valid
    node_context.w_samples_train = w_samples_train
    node_context.w_samples_valid = w_samples_valid
Пример #9
0

@jit(
    [
        uintp(
            TreeClassifierType,
            intp,
            uintp,
            boolean,
            boolean,
            uintp,
            float32,
            uint64,
            float32,
            uintp,
            uintp,
            float32,
            float32,
            uintp,
            uintp,
            uintp,
            uintp,
            float32,
            boolean,
            optional(uint64[::1]),
            uint64,
        ),
        uintp(
            TreeRegressorType,
            intp,
            uintp,
Пример #10
0
def compute_node_classifier_context(tree_context, node_context, start_train,
                                    end_train, start_valid, end_valid):
    """Computes the node context from the data and from the tree context for
    classification. Computations are saved in the passed node_context.

    Parameters
    ----------
    tree_context : TreeContext
        The tree context

    node_context : NodeClassifierContext
        The node context that this function will compute

    start_train : int
        Index of the first training sample in the node. We have that
        partition_train[start_train:end_train] contains the indexes of the node's
        training samples

    end_train : int
        End-index of the slice containing the node's training samples indexes

    start_valid : int
        Index of the first validation (out-of-the-bag) sample in the node. We have
        that partition_valid[start_valid:end_valid] contains the indexes of the
        node's validation samples

    end_valid : int
        End-index of the slice containing the node's validation samples indexes
    """
    # Initialize the things from the node context
    w_samples_train_in_bins = node_context.w_samples_train_in_bins
    w_samples_valid_in_bins = node_context.w_samples_valid_in_bins
    y_sum = node_context.y_sum
    y_pred = node_context.y_pred

    # If necessary, sample the features
    if node_context.sample_features:
        sample_without_replacement(node_context.features_pool,
                                   node_context.features_sampled)

    features = node_context.features_sampled
    w_samples_train_in_bins.fill(0.0)
    w_samples_valid_in_bins.fill(0.0)
    y_sum.fill(0.0)
    y_pred.fill(0.0)

    # Get information from the tree context
    X = tree_context.X
    y = tree_context.y
    sample_weights = tree_context.sample_weights
    partition_train = tree_context.partition_train
    partition_valid = tree_context.partition_valid
    n_classes = tree_context.n_classes
    dirichlet = tree_context.dirichlet

    # The indices of the training samples contained in the node
    train_indices = partition_train[start_train:end_train]
    valid_indices = partition_valid[start_valid:end_valid]

    # Weighted number of training and validation samples
    w_samples_train = 0.0
    w_samples_valid = 0.0

    # A counter for the features
    f = 0
    # The validation loss
    loss_valid = 0.0

    # TODO: unrolling the for loop could be faster
    # For-loop on features first and then samples (X is F-major)

    for feature in features:
        # Compute statistics about training samples
        for sample in train_indices:
            bin = X[sample, feature]
            label = uintp(y[sample])
            sample_weight = sample_weights[sample]
            if f == 0:
                w_samples_train += sample_weight
                y_pred[label] += sample_weight
            # One more sample in this bin for the current feature
            w_samples_train_in_bins[f, bin] += sample_weight
            # One more sample in this bin for the current feature with this label
            y_sum[f, bin, label] += sample_weight

        # TODO: we should put this outside so that we can change the dirichlet parameter
        # without rebuilding the tree
        # The prediction is given by the formula
        #   y_k = (n_k + dirichlet) / (n_samples + dirichlet * n_classes)
        # where n_k is the number of samples with label class k
        if f == 0:
            for k in range(n_classes):
                y_pred[k] = (y_pred[k] + dirichlet) / (w_samples_train +
                                                       n_classes * dirichlet)

        # Compute sample counts about validation samples
        for sample in valid_indices:
            bin = X[sample, feature]
            sample_weight = sample_weights[sample]
            if f == 0:
                w_samples_valid += sample_weight
                label = uintp(y[sample])
                # TODO: aggregation loss is hard-coded here. Call a function instead
                #  when implementing other losses
                loss_valid += -w_samples_valid * log(y_pred[label])

            w_samples_valid_in_bins[f, bin] += sample_weight

        f += 1

    # Save remaining things in the node context
    node_context.n_samples_train = end_train - start_train
    node_context.n_samples_valid = end_valid - start_valid
    node_context.loss_valid = loss_valid
    node_context.w_samples_train = w_samples_train
    node_context.w_samples_valid = w_samples_valid