コード例 #1
0
	def convert_to_BST_n(self, nu, head):
		#import ipdb; ipdb.set_trace()
		if (nu<=0):
			return None
		left = self.convert_to_BST_n(nu/2, head)
		node = Tree(head.contents)
		head = head.next
		node.left = left
		right = self.convert_to_BST_n(nu-nu/2-1, head)
		node.right = right
		return node
コード例 #2
0
ファイル: bst_insertion.py プロジェクト: aishs8/algorithms
def insert_in_bst(root, element):
    if root == None:
        root = Tree(element)

    elif root.data > element:
        #move left
        if root.left == None:
            root.left = Tree(element)
        else:
            insert_in_bst(root.left, element)
    elif root.data < element:
        #move right
        if root.right == None:
            root.right = Tree(element)
        else:
            insert_in_bst(root.right, element)
    return root
コード例 #3
0
ファイル: bst_insertion.py プロジェクト: aishs8/algorithms
def insert_in_bst_iterative(root, element):
    temp = root
    if root == None:
        root = Tree(element)
        return root
    else:
        while root != None:
            if root.data > element:
                if root.left:
                    root = root.left
                else:
                    root.left = Tree(element)
                    break
            elif root.data < element:
                if root.right:
                    root = root.right
                else:
                    root.right = Tree(element)
                    break
        return temp
コード例 #4
0
    def fit(self,
            X,
            y,
            sample_weight=None,
            check_input=True,
            X_idx_sorted=None):
        n_samples, self.n_features_ = X.shape
        y = np.atleast_1d(y)
        expanded_class_weight = None

        # is_classification = is_classifier(self)
        # if is_classification:
        #     y = np.copy(y)
        #     y_encoded = np.zeros(y.shape, dtype=np.int)
        #     classes_k , y_encoded = np.unique( y , return_inverse=True)
        #     y = y_encoded
        # else:
        self.classes_ = [None]

        self.n_classes_ = [1]

        # Build tree

        criterion = self.criterion
        splitter = self.splitter

        self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)

        builder = DepthFirstTreeBuilder(splitter, self.min_samples_split,
                                        self.min_samples_leaf,
                                        self.min_weight_leaf, self.max_depth,
                                        self.min_impurity_decrease,
                                        self.min_impurity_split)

        builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)

        return self
コード例 #5
0
	def convert_to_bst(self, start, end):
		self.count +=1
		#print "count: ", self.count
		rootNode = None
		if (start>end):
			return None
		else:
			mid =(start+end)/2
			# mid =(start+(end-start))/2

			#print "left :  " ,start, mid, self.array[mid]
			leftNode = Tree(self.convert_to_bst(start, mid-1))
			#leftNode.print_node()

			#print "mid: " ,self.array[mid]
			rootNode = Tree(self.array[mid])
			rootNode.left = leftNode
			#rootNode.print_node()

			#print "right: ", mid+1, end,  self.array[mid]
			rightNode = Tree(self.convert_to_bst(mid+1, end))
			rootNode.right = rightNode
			#rightNode.print_node()z
		return rootNode
コード例 #6
0
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator)):
    """Base class for decision trees.
       Warning: This class should not be used directly.
       Use derived classes instead.
       """
    @abstractmethod
    def __init__(self,
                 criterion,
                 splitter,
                 max_depth,
                 min_samples_split,
                 min_samples_leaf,
                 min_weight_fraction_leaf,
                 max_features,
                 max_leaf_nodes,
                 random_state,
                 min_impurity_decrease,
                 min_impurity_split,
                 class_weight=None,
                 presort=False):
        self.criterion = criterion
        self.splitter = splitter
        self.max_depth = max_depth
        self.min_samples_split = min_samples_split
        self.min_samples_leaf = min_samples_leaf
        self.min_weight_fraction_leaf = min_weight_fraction_leaf
        self.max_features = max_features
        self.random_state = random_state
        self.max_leaf_nodes = max_leaf_nodes
        self.min_impurity_decrease = min_impurity_decrease
        self.min_impurity_split = min_impurity_split
        self.class_weight = class_weight
        self.presort = presort

    def fit(self,
            X,
            y,
            sample_weight=None,
            check_input=True,
            X_idx_sorted=None):
        n_samples, self.n_features_ = X.shape
        y = np.atleast_1d(y)
        expanded_class_weight = None

        # is_classification = is_classifier(self)
        # if is_classification:
        #     y = np.copy(y)
        #     y_encoded = np.zeros(y.shape, dtype=np.int)
        #     classes_k , y_encoded = np.unique( y , return_inverse=True)
        #     y = y_encoded
        # else:
        self.classes_ = [None]

        self.n_classes_ = [1]

        # Build tree

        criterion = self.criterion
        splitter = self.splitter

        self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)

        builder = DepthFirstTreeBuilder(splitter, self.min_samples_split,
                                        self.min_samples_leaf,
                                        self.min_weight_leaf, self.max_depth,
                                        self.min_impurity_decrease,
                                        self.min_impurity_split)

        builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)

        return self

    def predict(self, X, check_input=True):
        proba = self.tree_.predict(X)
        return proba[:, 0]

    def apply(self, X, check_input=True):
        return self.tree_.apply(X)

    @property
    def feature_importances_(self):
        """Return the feature importances.
        The importance of a feature is computed as the (normalized) total
        reduction of the criterion brought by that feature.
        It is also known as the Gini importance.
        Returns
        -------
        feature_importances_ : array, shape = [n_features]
        """

        return self.tree_.compute_feature_importances()
コード例 #7
0
 def __init__(self, canvas: ICanvas):
     self._tree: Tree = Tree("Manager", canvas)
     self._canvas: ICanvas = canvas
コード例 #8
0
if getattr(y_train, "dtype", None) != DOUBLE or not y_train.flags.contigous:
    y_train = np.ascontiguousarray(y_train, dtype=DOUBLE)

max_depth = (np.iinfo(np.int32).max if max_depth is None else max_depth)
max_leaf_nodes = (-1 if max_leaf_nodes is None else max_leaf_nodes)

max_features = max(1, int(np.sqrt(n_features_)))

# Training Tree

criterion = CRITERIA_CLF[criterion](n_outputs, n_classes_)
SPLITTERS = DENSE_SPLITTERS
splitter = SPLITTERS[splitter](criterion, max_features, min_samples_leaf,
                               min_weight_leaf, random_state)

tree_ = Tree(n_features_, n_classes_, n_outputs)

builder = DepthFirstTreeBuilder(splitter, min_samples_split, min_samples_leaf,
                                min_weight_leaf, max_depth,
                                min_impurity_decrease, min_impurity_split)

builder.build(tree_, X_train, y_train)
classes_ = classes[0]
n_classes_ = n_classes_[0]

# Prune tree

n_classes_ = np.atleast_1d(n_classes_)
pruned_tree = Tree(n_features_, n_classes_, n_outputs)
_build_pruned_tree_ccp(pruned_tree, tree_, ccp_alpha)
tree_ = pruned_tree