'''newtrain = train_df.applymap(encode_units)
print(newtrain)
newtrain=newtrain.dropna()
predictors1 = train_df.columns.drop(['Product_ID','User_ID','Marital_Status','Stay_In_Current_City_Years'])
frequent_itemsets = apriori(newtrain[predictors1], min_support=0.07, use_colnames=True)
rules = association_rules(frequent_itemsets, metric="lift", min_threshold=1)
print(rules)
print(rules[ (rules['lift'] > 1.0) & (rules['confidence'] > 0.73)]) 
'''
X=train_df[predictors].loc[:2000,]
y=train_df[target].loc[:2000,]
clf = tree.DecisionTreeRegressor()
clf = clf.fit(X,y)
print(clf)

pd.DataFrame(clf.decision_path(X).toarray()).head(5)
pd.concat([X.reset_index(drop=True),pd.DataFrame(clf.decision_path(X).toarray())],1).head(5)

print("\n\n",find_node(tree_ = clf.tree_, current_node = 0, search_node = 13, features = X.columns.tolist()),"\n\n")
print(dataset[(dataset['Purchase'] >= 10000)])

dTree3 = DecisionTreeRegressor(max_depth = 6)
commonfit(dTree3, train_df, test_df, predictors, target, IDcol, 'DT-new.csv')

Xrules = pd.concat([X.reset_index(drop=True),pd.DataFrame(dTree3.decision_path(X).toarray()).iloc[:,1:]],1)


tree_model = DecisionTreeRegressor()
tree_model.fit(Xrules, y)\

commonfit(tree_model, train_df, test_df, predictors, target, IDcol, 'DT-new.csv')
                                 max_leaf_nodes=70,
                                 min_samples_split=3,
                                 splitter='best',
                                 criterion='mse')
    tree.fit(X_train, y_train
             )  #Build a decision tree classifier from the training set (X, y).
    print('min samples leaf = ', i)
    predictions = tree.predict(X_test)
    err1 = np.sum(abs(y_test - predictions)**2) / len(y_test)
    error.append(err1)
    r = r2_score(y_test, predictions)
    r2.append(r)
    print('Minimum numbers of leafs: ', err1)
    print('R SQUARED: ', r)

print(tree.decision_path(X_test))
df = pd.DataFrame({'Actual': y_test, 'Predicted': np.round(predictions)})
print(df)

err = np.sum(abs(y_test - predictions)) / len(y_test)
print('Mean absolute error for test =', err)

err1 = np.sum(abs(y_test - predictions)**2) / len(y_test)
print('Mean square error for test =', err1)

mse = np.sum(abs(y_test - predictions)**2)
print('MSE =', mse)

rmse = np.sqrt(mse / len(y_test))
print('RMSE Root mean squared error =', rmse)
def print_tree(estimator: DecisionTreeRegressor, X_test: DataFrame, scaler,
               feature_labels):
    n_nodes = estimator.tree_.node_count
    children_left = estimator.tree_.children_left
    children_right = estimator.tree_.children_right
    feature = estimator.tree_.feature
    threshold = estimator.tree_.threshold

    # The tree structure can be traversed to compute various properties such
    # as the depth of each node and whether or not it is a leaf.
    node_depth = np.zeros(shape=n_nodes, dtype=np.int64)
    is_leaves = np.zeros(shape=n_nodes, dtype=bool)
    stack = [(0, -1)]  # seed is the root node id and its parent depth
    while len(stack) > 0:
        node_id, parent_depth = stack.pop()
        node_depth[node_id] = parent_depth + 1

        # If we have a test node
        if (children_left[node_id] != children_right[node_id]):
            stack.append((children_left[node_id], parent_depth + 1))
            stack.append((children_right[node_id], parent_depth + 1))
        else:
            is_leaves[node_id] = True

    # print("The binary tree structure has %s nodes and has "
    #       "the following tree structure (top 3):"
    #       % n_nodes)
    # for i in range(max(3, n_nodes)):
    #     if is_leaves[i]:
    #         print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
    #     else:
    #         print("%snode=%s test node: go to node %s if X[:, %s] <= %s else to "
    #               "node %s."
    #               % (node_depth[i] * "\t",
    #                  i,
    #                  children_left[i],
    #                  feature[i],
    #                  threshold[i],
    #                  children_right[i],
    #                  ))
    # print()

    # First let's retrieve the decision path of each sample. The decision_path
    # method allows to retrieve the node indicator functions. A non zero element of
    # indicator matrix at the position (i, j) indicates that the sample i goes
    # through the node j.

    vals = scaler.transform(X_test)
    node_indicator = estimator.decision_path(vals)
    feature_labels = X_test.columns.tolist()

    # Similarly, we can also have the leaves ids reached by each sample.

    # leave_id = estimator.apply(X_test)

    # Now, it's possible to get the tests that were used to predict a sample or
    # a group of samples. First, let's make it for the sample.

    sample_id = 0
    node_index = node_indicator.indices[
        node_indicator.indptr[sample_id]:node_indicator.indptr[sample_id + 1]]

    print('Rules used to predict sample %s: ' % sample_id)
    for node_id in node_index:
        # if leave_id[sample_id] != node_id:
        #     continue

        if (vals[sample_id, feature[node_id]] <= threshold[node_id]):
            threshold_sign = "<="
        else:
            threshold_sign = ">"

        print("decision id node %s : (%s: %s %s %s), original val: %s" %
              (node_id, feature_labels[feature[node_id]],
               vals[sample_id, feature[node_id]], threshold_sign,
               threshold[node_id], X_test.iloc[sample_id, feature[node_id]]))

    # For a group of samples, we have the following common node.
    sample_ids = [0, 1]
    common_nodes = (node_indicator.toarray()[sample_ids].sum(
        axis=0) == len(sample_ids))

    common_node_id = np.arange(n_nodes)[common_nodes]

    print("\nThe following samples %s share the node %s in the tree" %
          (sample_ids, common_node_id))
    print("It is %s %% of all nodes." %
          (100 * len(common_node_id) / n_nodes, ))
Exemple #4
0
class GroupPCADecisionTreeRegressor(BaseEstimator, RegressorMixin):
    """ PCA on random group of features followed by a Decision Tree
    
    See : GroupPCA and DecisionTreeRegressor
    """

    def __init__(
        self,
        criterion="mse",
        splitter="best",
        max_depth=None,
        min_samples_split=2,
        min_samples_leaf=1,
        min_weight_fraction_leaf=0.0,
        max_features=None,
        random_state=None,
        max_leaf_nodes=None,
        min_impurity_decrease=0.0,
        min_impurity_split=None,
        presort=False,
        pca_bootstrap=False,
        pca_max_nb_groups=0.25,
        pca_max_group_size=0.05,
    ):

        self.criterion = criterion
        self.splitter = splitter
        self.max_depth = max_depth
        self.min_samples_split = min_samples_split
        self.min_samples_leaf = min_samples_leaf
        self.min_weight_fraction_leaf = min_weight_fraction_leaf
        self.max_features = max_features
        self.random_state = random_state
        self.max_leaf_nodes = max_leaf_nodes
        self.min_impurity_decrease = min_impurity_decrease
        self.min_impurity_split = min_impurity_split
        self.presort = presort

        self.pca_bootstrap = pca_bootstrap
        self.pca_max_nb_groups = pca_max_nb_groups
        self.pca_max_group_size = pca_max_group_size

        self._tree = None
        self._group_pca = None

    def fit(self, X, y, sample_weight=None, check_input=True, X_idx_sorted=None):

        self.n_features_ = X.shape[1]

        # 1) create GroupPCA
        self._group_pca = GroupPCA(
            random_state=self.random_state,
            bootstrap=self.pca_bootstrap,
            max_nb_groups=self.pca_max_nb_groups,
            max_group_size=self.pca_max_group_size,
        )
        # 2) Create Tree
        self._tree = DecisionTreeRegressor(
            criterion=self.criterion,
            splitter=self.splitter,
            max_depth=self.max_depth,
            min_samples_split=self.min_samples_split,
            min_samples_leaf=self.min_samples_leaf,
            min_weight_fraction_leaf=self.min_weight_fraction_leaf,
            max_features=self.max_features,
            max_leaf_nodes=self.max_leaf_nodes,
            random_state=self.random_state,
            min_impurity_decrease=self.min_impurity_decrease,
            min_impurity_split=self.min_impurity_split,
            presort=self.presort,
        )

        # 3) Apply group PCA
        Xpca = self._group_pca.fit_transform(X, y)

        # 4) fit Tree
        self._tree.fit(Xpca, y, sample_weight=sample_weight, check_input=check_input, X_idx_sorted=None)

        return self

    def predict(self, X, check_input=True):

        if self._tree is None:
            raise NotFittedError("You should fit the model first")

        Xpca = self._group_pca.transform(X)
        return self._tree.predict(Xpca, check_input=check_input)

    def apply(self, X, check_input=True):

        if self._tree is None:
            raise NotFittedError("You should fit the model first")

        Xpca = self._group_pca.transform(X)

        return self._tree.apply(Xpca, check_input=check_input)

    def decision_path(self, X, check_input=True):
        Xpca = self._group_pca.transform(X)

        return self._tree.decision_path(Xpca, check_input=check_input)

    @property
    def tree_(self):
        return self._tree.tree_

    def _validate_X_predict(self, X, check_input):
        """Validate X whenever one tries to predict, apply, predict_proba"""
        if check_input:
            X = check_array(X, dtype=DTYPE, accept_sparse="csr")
            if issparse(X) and (X.indices.dtype != np.intc or X.indptr.dtype != np.intc):
                raise ValueError("No support for np.int64 index based " "sparse matrices")

        n_features = X.shape[1]
        if self.n_features_ != n_features:
            raise ValueError(
                "Number of features of the model must "
                "match the input. Model n_features is %s and "
                "input n_features is %s " % (self.n_features_, n_features)
            )

        return X
class SingleTreeCateInterpreter(_SingleTreeInterpreter):
    """
    An interpreter for the effect estimated by a CATE estimator

    Parameters
    ----------
    include_model_uncertainty : bool, optional, default False
        Whether to include confidence interval information when building a
        simplified model of the cate model. If set to True, then
        cate estimator needs to support the `const_marginal_ate_inference` method.

    uncertainty_level : double, optional, default .1
        The uncertainty level for the confidence intervals to be constructed
        and used in the simplified model creation. If value=alpha
        then a multitask decision tree will be built such that all samples
        in a leaf have similar target prediction but also similar alpha
        confidence intervals.

    uncertainty_only_on_leaves : bool, optional, default True
        Whether uncertainty information should be displayed only on leaf nodes.
        If False, then interpretation can be slightly slower, especially for cate
        models that have a computationally expensive inference method.

    splitter : string, optional, default "best"
        The strategy used to choose the split at each node. Supported
        strategies are "best" to choose the best split and "random" to choose
        the best random split.

    max_depth : int or None, optional, default None
        The maximum depth of the tree. If None, then nodes are expanded until
        all leaves are pure or until all leaves contain less than
        min_samples_split samples.

    min_samples_split : int, float, optional, default 2
        The minimum number of samples required to split an internal node:

        - If int, then consider `min_samples_split` as the minimum number.
        - If float, then `min_samples_split` is a fraction and
          `ceil(min_samples_split * n_samples)` are the minimum
          number of samples for each split.

    min_samples_leaf : int, float, optional, default 1
        The minimum number of samples required to be at a leaf node.
        A split point at any depth will only be considered if it leaves at
        least ``min_samples_leaf`` training samples in each of the left and
        right branches.  This may have the effect of smoothing the model,
        especially in regression.

        - If int, then consider `min_samples_leaf` as the minimum number.
        - If float, then `min_samples_leaf` is a fraction and
          `ceil(min_samples_leaf * n_samples)` are the minimum
          number of samples for each node.

    min_weight_fraction_leaf : float, optional, default 0.
        The minimum weighted fraction of the sum total of weights (of all
        the input samples) required to be at a leaf node. Samples have
        equal weight when sample_weight is not provided.

    max_features : int, float or {"auto", "sqrt", "log2"}, default=None
        The number of features to consider when looking for the best split:

        - If int, then consider `max_features` features at each split.
        - If float, then `max_features` is a fraction and
          `int(max_features * n_features)` features are considered at each
          split.
        - If "auto", then `max_features=n_features`.
        - If "sqrt", then `max_features=sqrt(n_features)`.
        - If "log2", then `max_features=log2(n_features)`.
        - If None, then `max_features=n_features`.

        Note: the search for a split does not stop until at least one
        valid partition of the node samples is found, even if it requires to
        effectively inspect more than ``max_features`` features.

    random_state : int, RandomState instance or None, optional, default None
        If int, random_state is the seed used by the random number generator;
        If RandomState instance, random_state is the random number generator;
        If None, the random number generator is the RandomState instance used
        by `np.random`.

    max_leaf_nodes : int or None, optional, default None
        Grow a tree with ``max_leaf_nodes`` in best-first fashion.
        Best nodes are defined as relative reduction in impurity.
        If None then unlimited number of leaf nodes.

    min_impurity_decrease : float, optional, default 0.
        A node will be split if this split induces a decrease of the impurity
        greater than or equal to this value.

        The weighted impurity decrease equation is the following::

            N_t / N * (impurity - N_t_R / N_t * right_impurity
                                - N_t_L / N_t * left_impurity)

        where ``N`` is the total number of samples, ``N_t`` is the number of
        samples at the current node, ``N_t_L`` is the number of samples in the
        left child, and ``N_t_R`` is the number of samples in the right child.
        ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
        if ``sample_weight`` is passed.
    """

    def __init__(self, *,
                 include_model_uncertainty=False,
                 uncertainty_level=.1,
                 uncertainty_only_on_leaves=True,
                 splitter="best",
                 max_depth=None,
                 min_samples_split=2,
                 min_samples_leaf=1,
                 min_weight_fraction_leaf=0.,
                 max_features=None,
                 random_state=None,
                 max_leaf_nodes=None,
                 min_impurity_decrease=0.):
        self.include_uncertainty = include_model_uncertainty
        self.uncertainty_level = uncertainty_level
        self.uncertainty_only_on_leaves = uncertainty_only_on_leaves
        self.criterion = "mse"
        self.splitter = splitter
        self.max_depth = max_depth
        self.min_samples_split = min_samples_split
        self.min_samples_leaf = min_samples_leaf
        self.min_weight_fraction_leaf = min_weight_fraction_leaf
        self.max_features = max_features
        self.random_state = random_state
        self.max_leaf_nodes = max_leaf_nodes
        self.min_impurity_decrease = min_impurity_decrease

    def interpret(self, cate_estimator, X):
        """
        Interpret the heterogeneity of a CATE estimator when applied to a set of features

        Parameters
        ----------
        cate_estimator : :class:`.LinearCateEstimator`
            The fitted estimator to interpret

        X : array-like
            The features against which to interpret the estimator;
            must be compatible shape-wise with the features used to fit
            the estimator

        Returns
        -------
        self: object instance
        """
        self.tree_model_ = DecisionTreeRegressor(criterion=self.criterion,
                                                 splitter=self.splitter,
                                                 max_depth=self.max_depth,
                                                 min_samples_split=self.min_samples_split,
                                                 min_samples_leaf=self.min_samples_leaf,
                                                 min_weight_fraction_leaf=self.min_weight_fraction_leaf,
                                                 max_features=self.max_features,
                                                 random_state=self.random_state,
                                                 max_leaf_nodes=self.max_leaf_nodes,
                                                 min_impurity_decrease=self.min_impurity_decrease)
        y_pred = cate_estimator.const_marginal_effect(X)

        self.tree_model_.fit(X, y_pred.reshape((y_pred.shape[0], -1)))
        paths = self.tree_model_.decision_path(X)
        node_dict = {}
        for node_id in range(paths.shape[1]):
            mask = paths.getcol(node_id).toarray().flatten().astype(bool)
            Xsub = X[mask]
            if (self.include_uncertainty and
                    ((not self.uncertainty_only_on_leaves) or (self.tree_model_.tree_.children_left[node_id] < 0))):
                res = cate_estimator.const_marginal_ate_inference(Xsub)
                node_dict[node_id] = {'mean': res.mean_point,
                                      'std': res.std_point,
                                      'ci': res.conf_int_mean(alpha=self.uncertainty_level)}
            else:
                cate_node = y_pred[mask]
                node_dict[node_id] = {'mean': np.mean(cate_node, axis=0),
                                      'std': np.std(cate_node, axis=0)}
        self.node_dict_ = node_dict
        return self

    def _make_dot_exporter(self, *, out_file, feature_names, treatment_names, max_depth, filled,
                           leaves_parallel, rotate, rounded,
                           special_characters, precision):
        return _CateTreeDOTExporter(self.include_uncertainty, self.uncertainty_level,
                                    out_file=out_file, feature_names=feature_names,
                                    treatment_names=treatment_names,
                                    max_depth=max_depth,
                                    filled=filled,
                                    leaves_parallel=leaves_parallel, rotate=rotate, rounded=rounded,
                                    special_characters=special_characters, precision=precision)

    def _make_mpl_exporter(self, *, title, feature_names, treatment_names, max_depth,
                           filled,
                           rounded, precision, fontsize):
        return _CateTreeMPLExporter(self.include_uncertainty, self.uncertainty_level,
                                    title=title, feature_names=feature_names,
                                    treatment_names=treatment_names,
                                    max_depth=max_depth,
                                    filled=filled,
                                    rounded=rounded,
                                    precision=precision, fontsize=fontsize)
Exemple #6
0
            meanSquare = (N_left*meanSquareLeft + N_right*meanSqaureRight) / N
            linearReg = linearReg[0]
            meanSquare = np.sqrt(meanSquare[0])
            if(linearReg < linearRegBest):
                linearRegBest = linearReg
                meanSquareBest = meanSquare
                print(leftModel)
                model = [leftModel, rightModel]
                print(model)
    return linearReg, meanSquareBest

modelTree, meanSquare = ModelTree();
print(modelTree," ",meanSquare)
print ("Time taken to build the model: ",datetime.now() - startTime)

node_indicator = regressionTree.decision_path(X_test)

leave_id = regressionTree.apply(X_test)

sample_id = 0
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
                                    node_indicator.indptr[sample_id + 1]]

for i in range(n_nodes):
    if is_leaves[i]:
        print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
    else:
        print("%snode=%s test node: go to node %s if X[:, %s] <= %s else to "
              "node %s."
              % (node_depth[i] * "\t",
                 i,