Ejemplo n.º 1
0
    def _init_data_container(self, X: np.ndarray, y: np.ndarray):
        """Fills a pyrfr default data container, s.t. the forest knows
        categoricals and bounds for continous data

        Parameters
        ----------
        X : np.ndarray [n_samples, n_features]
            Input data points
        y : np.ndarray [n_samples, ]
            Corresponding target values

        Returns
        -------
        data : regression.default_data_container
            The filled data container that pyrfr can interpret
        """
        # retrieve the types and the bounds from the ConfigSpace
        data = regression.default_data_container(X.shape[1])

        for i, (mn, mx) in enumerate(self.bounds):
            if np.isnan(mx):
                data.set_type_of_feature(i, mn)
            else:
                data.set_bounds_of_feature(i, mn, mx)

        for row_X, row_y in zip(X, y):
            data.add_data_point(row_X, row_y)
        return data
Ejemplo n.º 2
0
    def train(self, X, y, **kwargs):
        """
        Trains the random forest on X and y.

        Parameters
        ----------
        X: np.ndarray (N, D)
            Input data points. The dimensionality of X is (N, D),
            with N as the number of points and D is the number of features.
        y: np.ndarray (N,)
            The corresponding target values.
        """

        self.X = X
        self.y = y

        if self.n_points_per_tree == 0:
            self.rf.options.num_data_points_per_tree = X.shape[0]

        data = reg.default_data_container(self.X.shape[1])

        for row_X, row_y in zip(X, y):
            data.add_data_point(row_X, row_y)

        self.rf.fit(data, self.reg_rng)
Ejemplo n.º 3
0
    def __init_data_container(self, X: np.ndarray, y: np.ndarray):
        """
        Biggest difference to SMAC3s EPM. We fit the forrest on a transformation and predict the untransformed result.
        Fills a pyrfr default data container, s.t. the forest knows
        categoricals and bounds for continous data

        Parameters
        ----------
        X : np.ndarray [n_samples, n_features]
            Input data points
        y : np.ndarray [n_samples, ]
            Corresponding target values

        Returns
        -------
        data : regression.default_data_container
            The filled data container that pyrfr can interpret
        """
        # retrieve the types and the bounds from the ConfigSpace
        data = regression.default_data_container(X.shape[1])
        if self.logged_y:
            y = y.reshape((-1, 1))
            y = np.hstack((y, np.power(10, y)))

        for i, (mn, mx) in enumerate(self.bounds):
            if np.isnan(mx):
                data.set_type_of_feature(i, mn)
            else:
                data.set_bounds_of_feature(i, mn, mx)

        for row_X, row_y in zip(X, y):
            data.add_data_point(row_X, row_y)
        return data
Ejemplo n.º 4
0
    def train(self, X, y, **kwargs):
        """
        Trains the random forest on X and y.

        Parameters
        ----------
        X: np.ndarray (N, D)
            Input data points. The dimensionality of X is (N, D),
            with N as the number of points and D is the number of features.
        y: np.ndarray (N,)
            The corresponding target values.
        """

        self.X = X
        self.y = y

        if self.n_points_per_tree == 0:
            self.rf.options.num_data_points_per_tree = X.shape[0]

        data = reg.default_data_container(self.X.shape[1])

        for row_X, row_y in zip(X, y):
            data.add_data_point(row_X, row_y)

        self.rf.fit(data, self.reg_rng)
	def setUp(self):
		data_set_prefix = '${CMAKE_SOURCE_DIR}/test_data_sets/'
		self.data = reg.default_data_container(64)
		self.data.import_csv_files(data_set_prefix+'features13.csv', data_set_prefix+'responses13.csv')
		
		self.rng = reg.default_random_engine(1)
		self.forest_constructor = reg.qr_forest
Ejemplo n.º 6
0
    def setUp(self):
        data_set_prefix = '${CMAKE_SOURCE_DIR}/test_data_sets/'
        self.data = reg.default_data_container(3)
        self.data.import_csv_files(
            data_set_prefix + 'online_lda_features.csv',
            data_set_prefix + 'online_lda_responses.csv')

        self.rng = reg.default_random_engine(1)
        self.forest_constructor = reg.fanova_forest
        def init_data(X, y, bounds):
            data = reg.default_data_container(len(X[0]))

            for i, (mn, mx) in enumerate(bounds):
                if math.isnan(mx):
                    data.set_type_of_feature(i, mn)
                else:
                    data.set_bounds_of_feature(i, mn, mx)

            for row_X, row_y in zip(X, y):
                data.add_data_point(row_X, row_y)
            return data
Ejemplo n.º 8
0
    def __init__(self,
                 X_init: np.ndarray,
                 Y_init: np.ndarray,
                 num_trees: int = 30,
                 do_bootstrapping: bool = True,
                 n_points_per_tree: int = 0,
                 seed: int = None) -> None:
        """
        Interface to random forests for Bayesian optimization based on pyrfr package which due to the random splitting
        gives better uncertainty estimates than the sklearn random forest.

        Dependencies:
            AutoML rfr (https://github.com/automl/random_forest_run)

        :param X_init: Initial input data points to train the model
        :param Y_init: Initial target values
        :param num_trees: Specifies the number of trees to build the random forest
        :param do_bootstrapping: Defines if we use boostrapping for the individual trees or not
        :param n_points_per_tree: Specifies the number of points for each individual tree (0 mean no restriction)
        :param seed: Used to seed the random number generator for the random forest (None means random seed)
        """
        super().__init__()

        # Set random number generator for the random forest
        if seed is None:
            seed = np.random.randint(10000)
        self.reg_rng = reg.default_random_engine(seed)

        self.n_points_per_tree = n_points_per_tree

        self.rf = reg.binary_rss_forest()
        self.rf.options.num_trees = num_trees

        self.rf.options.do_bootstrapping = do_bootstrapping

        self.rf.options.num_data_points_per_tree = n_points_per_tree

        self._X = X_init
        self._Y = Y_init

        if self.n_points_per_tree == 0:
            self.rf.options.num_data_points_per_tree = X_init.shape[0]

        data = reg.default_data_container(self._X.shape[1])

        for row_X, row_y in zip(X_init, Y_init):
            data.add_data_point(row_X, row_y)

        self.rf.fit(data, self.reg_rng)
Ejemplo n.º 9
0
    def update_data(self, X: np.ndarray, Y: np.ndarray) -> None:
        """
        Updates model with new data points.

        :param X: new points
        :param Y: function values at new points X
        """
        self._X = np.append(self._X, X, axis=0)
        self._Y = np.append(self._Y, Y, axis=0)

        data = reg.default_data_container(self.X.shape[1])

        for row_X, row_y in zip(self._X, self._Y):
            data.add_data_point(row_X, row_y)

        self.rf.fit(data, self.reg_rng)
Ejemplo n.º 10
0
	def setUp(self):
		data_set_prefix = '${CMAKE_SOURCE_DIR}/test_data_sets/'
		self.data = reg.default_data_container(64)
		self.data.import_csv_files(data_set_prefix+'features13.csv', data_set_prefix+'responses13.csv')
		

		self.forest = reg.binary_rss_forest()
		self.forest.options.num_trees = 64
		self.forest.options.do_bootstrapping = True
		self.forest.options.num_data_points_per_tree = 200

		self.assertEqual(self.forest.options.num_trees, 64)
		self.assertTrue (self.forest.options.do_bootstrapping)
		self.assertEqual(self.forest.options.num_data_points_per_tree, 200)

		self.rng = reg.default_random_engine(1)
Ejemplo n.º 11
0
    def __init__(self,
                 X,
                 Y,
                 config_space=None,
                 n_trees=16,
                 seed=None,
                 bootstrapping=True,
                 points_per_tree=None,
                 max_features=None,
                 min_samples_split=0,
                 min_samples_leaf=0,
                 max_depth=64,
                 cutoffs=(-np.inf, np.inf)):
        """
        Calculate and provide midpoints and sizes from the forest's 
        split values in order to get the marginals
        
        Parameters
        ------------
        X: matrix with the features
        
        Y: vector with the response values
        
        config_space : ConfigSpace instantiation
        
        forest: trained random forest

        n_trees: number of trees in the forest to be fit
        
        seed: seed for the forests randomness
        
        bootstrapping: whether or not to bootstrap the data for each tree
        
        points_per_tree: number of points used for each tree 
                        (only subsampling if bootstrapping is false)
        
        max_features: number of features to be used at each split, default is 70%
        
        min_samples_split: minimum number of samples required to attempt to split 
        
        min_samples_leaf: minimum number of samples required in a leaf
        
        max_depth: maximal depth of each tree in the forest
        """

        pcs = [(np.nan, np.nan)] * X.shape[1]

        # if no ConfigSpace is specified, let's build one with all continuous variables
        if (config_space is None):
            # if no info is given, use min and max values of each variable as bounds
            config_space = ConfigSpace.ConfigurationSpace()
            for i, (mn,
                    mx) in enumerate(zip(np.min(X, axis=0), np.max(X,
                                                                   axis=0))):
                config_space.add_hyperparameter(
                    UniformFloatHyperparameter("x_%03i" % i, mn, mx))

        self.percentiles = np.percentile(Y, range(0, 100))
        self.cs = config_space
        self.cs_params = self.cs.get_hyperparameters()
        self.n_dims = len(self.cs_params)
        self.n_trees = n_trees
        self._dict = False

        # at this point we have a valid ConfigSpace object
        # check if param number is correct etc:
        if X.shape[1] != len(self.cs_params):
            raise RuntimeError(
                'Number of parameters in ConfigSpace object does not match input X'
            )
        for i in range(len(self.cs_params)):
            if not isinstance(self.cs_params[i], (CategoricalHyperparameter)):
                if (np.max(X[:, i]) > self.cs_params[i].upper) or \
                        (np.min(X[:, i]) < self.cs_params[i].lower):
                    raise RuntimeError(
                        'Some sample values from X are not in the given interval'
                    )
            else:
                unique_vals = set(X[:, i])
                if len(unique_vals) > len(self.cs_params[i].choices):
                    raise RuntimeError(
                        'There are some categoricals missing in the ConfigSpace specification'
                    )

        # initialize all types as 0
        types = np.zeros(len(self.cs_params), dtype=np.uint)
        # retrieve the types and the bounds from the ConfigSpace
        # TODO: Test if that actually works
        for i, hp in enumerate(self.cs_params):
            if isinstance(hp, CategoricalHyperparameter):
                types[i] = len(hp.choices)
                pcs[i] = (len(hp.choices), np.nan)
            else:
                pcs[i] = (hp.lower, hp.upper)

        # set forest options
        forest = reg.fanova_forest()
        forest.options.num_trees = n_trees
        forest.options.do_bootstrapping = bootstrapping
        forest.options.num_data_points_per_tree = X.shape[
            0] if points_per_tree is None else points_per_tree
        forest.options.tree_opts.max_features = (
            X.shape[1] * 7) // 10 if max_features is None else max_features

        forest.options.tree_opts.min_samples_to_split = min_samples_split
        forest.options.tree_opts.min_samples_in_leaf = min_samples_leaf
        forest.options.tree_opts.max_depth = max_depth
        forest.options.tree_opts.epsilon_purity = 1e-8

        # create data conatainer and provide all the necessary information
        if seed is None:
            rng = reg.default_random_engine(np.random.randint(2**31 - 1))
        else:
            rng = reg.default_random_engine(seed)
        data = reg.default_data_container(X.shape[1])

        for i, (mn, mx) in enumerate(pcs):
            if (np.isnan(mx)):
                data.set_type_of_feature(i, mn)
            else:
                data.set_bounds_of_feature(i, mn, mx)

        for i in range(len(Y)):
            try:
                data.add_data_point(X[i].tolist(), Y[i])
            except:
                print("failed to process datapoint:", X[i].tolist())
                raise

        forest.fit(data, rng)

        self.the_forest = forest

        # initialize a dictionary with parameter dims
        self.variance_dict = dict()

        # getting split values
        forest_split_values = self.the_forest.all_split_values()

        # all midpoints and interval sizes treewise for the whole forest
        self.all_midpoints = []
        self.all_sizes = []

        #compute midpoints and interval sizes for variables in each tree
        for tree_split_values in forest_split_values:
            sizes = []
            midpoints = []
            for i, split_vals in enumerate(tree_split_values):
                if np.isnan(pcs[i][1]):  # categorical parameter
                    # check if the tree actually splits on this parameter
                    if len(split_vals) > 0:
                        midpoints.append(split_vals)
                        sizes.append(np.ones(len(split_vals)))
                    # if not, simply append 0 as the value with the number
                    # of categories as the size, that way this parameter will
                    # get 0 importance from this tree.
                    else:
                        midpoints.append((0, ))
                        sizes.append((pcs[i][0], ))
                else:
                    # add bounds to split values
                    sv = np.array([pcs[i][0]] + list(split_vals) + [pcs[i][1]])
                    # compute midpoints and sizes
                    midpoints.append((1 / 2) * (sv[1:] + sv[:-1]))
                    sizes.append(sv[1:] - sv[:-1])

            self.all_midpoints.append(midpoints)
            self.all_sizes.append(sizes)

        # capital V in the paper
        self.trees_total_variances = []
        # dict of lists where the keys are tuples of the dimensions
        # and the value list contains \hat{f}_U for the individual trees
        # reset all the variance fractions computed
        self.trees_variance_fractions = {}
        self.V_U_total = {}
        self.V_U_individual = {}

        self.cutoffs = cutoffs
        self.set_cutoffs(cutoffs)
import pyrfr.regression as reg
data = reg.default_data_container(64)
Ejemplo n.º 13
0
    def __init__(self, X, Y, config_space=None,
                 n_trees=16, seed=None, bootstrapping=True,
                 points_per_tree=None, max_features=None,
                 min_samples_split=0, min_samples_leaf=0,
                 max_depth=64, cutoffs=(-np.inf, np.inf)):

        """
        Calculate and provide midpoints and sizes from the forest's 
        split values in order to get the marginals
        
        Parameters
        ------------
        X: matrix with the features, either a np.array or a pd.DataFrame (numerically encoded)
        
        Y: vector with the response values (numerically encoded)
        
        config_space : ConfigSpace instantiation
        
        n_trees: number of trees in the forest to be fit
        
        seed: seed for the forests randomness
        
        bootstrapping: whether or not to bootstrap the data for each tree
        
        points_per_tree: number of points used for each tree 
                        (only subsampling if bootstrapping is false)
        
        max_features: number of features to be used at each split, default is 70%
        
        min_samples_split: minimum number of samples required to attempt to split 
        
        min_samples_leaf: minimum number of samples required in a leaf
        
        max_depth: maximal depth of each tree in the forest
        
        cutoffs: tuple of (lower, upper), all values outside this range will be
                 mapped to either the lower or the upper bound. (See:
                 "Generalized Functional ANOVA Diagnostics for High Dimensional
                 Functions of Dependent Variables" by Hooker.)
        """
        logging.basicConfig(level=logging.INFO)
        self.logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__)

        pcs = [(np.nan, np.nan)] * X.shape[1]

        # Convert pd.DataFrame to np.array
        if isinstance(X, pd.DataFrame):
            self.logger.debug("Detected pandas dataframes, converting to floats...")
            if config_space is not None:
                # Check if column names match parameter names
                bad_input = set(X.columns) - set(config_space.get_hyperparameter_names())
                if len(bad_input) != 0:
                    raise ValueError("Could not identify parameters %s from pandas dataframes" % str(bad_input))
                # Reorder dataframe if necessary
                X = X[config_space.get_hyperparameter_names()]
            X = X.to_numpy()
        elif config_space is not None:
            # There is a config_space but no way to check if the np.array'ed data in X is in the correct order...
            self.logger.warning("Note that fANOVA expects data to be ordered like the return of ConfigSpace's "
                                "'get_hyperparameters'-method. We recommend to use labeled pandas dataframes to "
                                "avoid any problems.")

        # if no ConfigSpace is specified, let's build one with all continuous variables
        if config_space is None:
            # if no info is given, use min and max values of each variable as bounds
            config_space = ConfigSpace.ConfigurationSpace()
            for i, (mn, mx) in enumerate(zip(np.min(X, axis=0), np.max(X, axis=0))):
                config_space.add_hyperparameter(UniformFloatHyperparameter("x_%03i" % i, mn, mx))

        self.percentiles = np.percentile(Y, range(0, 100))
        self.cs = config_space
        self.cs_params = self.cs.get_hyperparameters()
        self.n_dims = len(self.cs_params)
        self.n_trees = n_trees
        self._dict = False

        # at this point we have a valid ConfigSpace object
        # check if param number is correct etc:
        if X.shape[1] != len(self.cs_params):
            raise RuntimeError('Number of parameters in ConfigSpace object does not match input X')
        for i in range(len(self.cs_params)):
            if isinstance(self.cs_params[i], NumericalHyperparameter):
                if (np.max(X[:, i]) > self.cs_params[i].upper) or \
                        (np.min(X[:, i]) < self.cs_params[i].lower):
                    raise RuntimeError('Some sample values from X are not in the given interval')
            elif isinstance(self.cs_params[i], CategoricalHyperparameter):
                unique_vals = set(X[:, i])
                if len(unique_vals) > len(self.cs_params[i].choices):
                    raise RuntimeError("There are some categoricals missing in the ConfigSpace specification for "
                                       "hyperparameter %s:" % self.cs_params[i].name)
            elif isinstance(self.cs_params[i], OrdinalHyperparameter):
                unique_vals = set(X[:, i])
                if len(unique_vals) > len(self.cs_params[i].sequence):
                    raise RuntimeError("There are some sequence-options missing in the ConfigSpace specification for "
                                       "hyperparameter %s:" % self.cs_params[i].name)
            elif isinstance(self.cs_params[i], Constant):
                # oddly, unparameterizedhyperparameter and constant are not supported. 
                # raise TypeError('Unsupported Hyperparameter: %s' % type(self.cs_params[i]))
                pass
                # unique_vals = set(X[:, i])
                # if len(unique_vals) > 1:
                #     raise RuntimeError('Got multiple values for Unparameterized (Constant) hyperparameter')
            else:
                raise TypeError('Unsupported Hyperparameter: %s' % type(self.cs_params[i]))

        if not np.issubdtype(X.dtype, np.float64):
            logging.warning('low level library expects X argument to be float')
        if not np.issubdtype(Y.dtype, np.float64):
            logging.warning('low level library expects Y argument to be float')

        # initialize all types as 0
        types = np.zeros(len(self.cs_params), dtype=np.uint)
        # retrieve the types and the bounds from the ConfigSpace 
        # TODO: Test if that actually works
        for i, hp in enumerate(self.cs_params):
            if isinstance(hp, CategoricalHyperparameter):
                types[i] = len(hp.choices)
                pcs[i] = (len(hp.choices), np.nan)
            elif isinstance(hp, OrdinalHyperparameter):
                types[i] = len(hp.sequence)
                pcs[i] = (len(hp.sequence), np.nan)
            elif isinstance(self.cs_params[i], NumericalHyperparameter):
                pcs[i] = (hp.lower, hp.upper)
            elif isinstance(self.cs_params[i], Constant):
                types[i] = 1
                pcs[i] = (1, np.nan)
            else:
                raise TypeError('Unsupported Hyperparameter: %s' % type(hp))

        # set forest options
        forest = reg.fanova_forest()
        forest.options.num_trees = n_trees
        forest.options.do_bootstrapping = bootstrapping
        forest.options.num_data_points_per_tree = X.shape[0] if points_per_tree is None else points_per_tree
        forest.options.tree_opts.max_features = (X.shape[1] * 7) // 10 if max_features is None else max_features

        forest.options.tree_opts.min_samples_to_split = min_samples_split
        forest.options.tree_opts.min_samples_in_leaf = min_samples_leaf
        forest.options.tree_opts.max_depth = max_depth
        forest.options.tree_opts.epsilon_purity = 1e-8

        # create data container and provide all the necessary information
        if seed is None:
            rng = reg.default_random_engine(np.random.randint(2 ** 31 - 1))
        else:
            rng = reg.default_random_engine(seed)
        data = reg.default_data_container(X.shape[1])

        for i, (mn, mx) in enumerate(pcs):
            if np.isnan(mx):
                data.set_type_of_feature(i, mn)
            else:
                data.set_bounds_of_feature(i, mn, mx)

        for i in range(len(Y)):
            self.logger.debug("process datapoint: %s", str(X[i].tolist()))
            data.add_data_point(X[i].tolist(), Y[i])

        forest.fit(data, rng)

        self.the_forest = forest

        # initialize a dictionary with parameter dims
        self.variance_dict = dict()

        # getting split values
        forest_split_values = self.the_forest.all_split_values()

        # all midpoints and interval sizes treewise for the whole forest
        self.all_midpoints = []
        self.all_sizes = []

        # compute midpoints and interval sizes for variables in each tree
        for tree_split_values in forest_split_values:
            sizes = []
            midpoints = []
            for i, split_vals in enumerate(tree_split_values):
                if np.isnan(pcs[i][1]):  # categorical parameter
                    # check if the tree actually splits on this parameter
                    if len(split_vals) > 0:
                        midpoints.append(split_vals)
                        sizes.append(np.ones(len(split_vals)))
                    # if not, simply append 0 as the value with the number of categories as the size, that way this
                    # parameter will get 0 importance from this tree.
                    else:
                        midpoints.append((0,))
                        sizes.append((pcs[i][0],))
                else:
                    # add bounds to split values
                    sv = np.array([pcs[i][0]] + list(split_vals) + [pcs[i][1]])
                    # compute midpoints and sizes
                    midpoints.append((1 / 2) * (sv[1:] + sv[:-1]))
                    sizes.append(sv[1:] - sv[:-1])

            self.all_midpoints.append(midpoints)
            self.all_sizes.append(sizes)

        # capital V in the paper
        self.trees_total_variances = []
        # dict of lists where the keys are tuples of the dimensions
        # and the value list contains \hat{f}_U for the individual trees
        # reset all the variance fractions computed
        self.trees_variance_fractions = {}
        self.V_U_total = {}
        self.V_U_individual = {}

        self.cutoffs = cutoffs
        self.set_cutoffs(cutoffs)
Ejemplo n.º 14
0
	def test_data_container(self):
		data = reg.default_data_container(10)

		data.add_data_point([1]*10, 2)
		data.retrieve_data_point(0)
Ejemplo n.º 15
0
    def __init__(self, X, Y, config_space=None, 
                n_trees=16, seed=None, bootstrapping=True,
                points_per_tree = None, max_features=None,
                min_samples_split=0, min_samples_leaf=0,
                max_depth=64, cutoffs= (-np.inf, np.inf)):

        """
        Calculate and provide midpoints and sizes from the forest's 
        split values in order to get the marginals
        
        Parameters
        ------------
        X: matrix with the features (numerically encoded)
        
        Y: vector with the response values (numerically encoded)
        
        config_space : ConfigSpace instantiation
        
        forest: trained random forest

        n_trees: number of trees in the forest to be fit
        
        seed: seed for the forests randomness
        
        bootstrapping: whether or not to bootstrap the data for each tree
        
        points_per_tree: number of points used for each tree 
                        (only subsampling if bootstrapping is false)
        
        max_features: number of features to be used at each split, default is 70%
        
        min_samples_split: minimum number of samples required to attempt to split 
        
        min_samples_leaf: minimum number of samples required in a leaf
        
        max_depth: maximal depth of each tree in the forest
        
        cutoffs: tuple of (lower, upper), all values outside this range will be
                 mapped to either the lower or the upper bound. (See:
                 "Generalized Functional ANOVA Diagnostics for High Dimensional
                 Functions of Dependent Variables" by Hooker.)
        """
        logging.basicConfig(level=logging.INFO)
        self.logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__)

        pcs = [(np.nan, np.nan)]*X.shape[1]

        # if no ConfigSpace is specified, let's build one with all continuous variables
        if (config_space is None):
            # if no info is given, use min and max values of each variable as bounds
            config_space = ConfigSpace.ConfigurationSpace()
            for i,(mn, mx) in enumerate(zip(np.min(X,axis=0), np.max(X, axis=0) )):
                config_space.add_hyperparameter(UniformFloatHyperparameter("x_%03i" %i, mn, mx))
                
        self.percentiles = np.percentile(Y, range(0,100))
        self.cs = config_space
        self.cs_params =self.cs.get_hyperparameters()
        self.n_dims = len(self.cs_params)
        self.n_trees = n_trees
        self._dict = False

        # at this point we have a valid ConfigSpace object
        # check if param number is correct etc:
        if X.shape[1] != len(self.cs_params):
            raise RuntimeError('Number of parameters in ConfigSpace object does not match input X')
        for i in range(len(self.cs_params)):
            if isinstance(self.cs_params[i], NumericalHyperparameter):
                if (np.max(X[:, i]) > self.cs_params[i].upper) or \
                        (np.min(X[:, i]) < self.cs_params[i].lower):
                    raise RuntimeError('Some sample values from X are not in the given interval')
            elif isinstance(self.cs_params[i], CategoricalHyperparameter):
                unique_vals = set(X[:, i])
                if len(unique_vals) > len(self.cs_params[i].choices):
                    raise RuntimeError('There are some categoricals missing in the ConfigSpace specification for hyperparameter %s:' % self.cs_params[i].name)
            elif isinstance(self.cs_params[i], (Constant)):
                # oddly, unparameterizedhyperparameter and constant are not supported. 
                # raise TypeError('Unsupported Hyperparameter: %s' % type(self.cs_params[i]))
                pass
                # unique_vals = set(X[:, i])
                # if len(unique_vals) > 1:
                #     raise RuntimeError('Got multiple values for Unparameterized (Constant) hyperparameter')
            else:
                raise TypeError('Unsupported Hyperparameter: %s' % type(self.cs_params[i]))
        
        if not np.issubdtype(X.dtype, np.float64):
            logging.warning('low level library expects X argument to be float')
        if not np.issubdtype(Y.dtype, np.float64):
            logging.warning('low level library expects Y argument to be float')

        # initialize all types as 0
        types = np.zeros(len(self.cs_params), dtype=np.uint)
        # retrieve the types and the bounds from the ConfigSpace 
        # TODO: Test if that actually works
        for i, hp in enumerate(self.cs_params):
            if isinstance(hp, CategoricalHyperparameter):
                types[i] = len(hp.choices)
                pcs[i] = (len(hp.choices), np.nan)
            elif isinstance(self.cs_params[i], NumericalHyperparameter):
                pcs[i] = (hp.lower, hp.upper)
            elif isinstance(self.cs_params[i], (Constant)):
                # raise TypeError('Unsupported Hyperparameter: %s' % type(hp))
                types[i] = 1
                pcs[i] = (1, np.nan)
            else:
                raise TypeError('Unsupported Hyperparameter: %s' % type(hp))

        # set forest options
        forest = reg.fanova_forest()
        forest.options.num_trees = n_trees
        forest.options.do_bootstrapping = bootstrapping
        forest.options.num_data_points_per_tree = X.shape[0] if points_per_tree is None else points_per_tree
        forest.options.tree_opts.max_features = (X.shape[1]*7)//10 if max_features is None else max_features

        forest.options.tree_opts.min_samples_to_split = min_samples_split
        forest.options.tree_opts.min_samples_in_leaf = min_samples_leaf
        forest.options.tree_opts.max_depth=max_depth
        forest.options.tree_opts.epsilon_purity = 1e-8

        # create data conatainer and provide all the necessary information
        if seed is None:
            rng = reg.default_random_engine( np.random.randint(2**31-1))
        else:
            rng = reg.default_random_engine(seed)
        data = reg.default_data_container(X.shape[1])

        for i, (mn,mx) in enumerate(pcs):
            if(np.isnan(mx)):
                data.set_type_of_feature(i, mn)
            else:
                data.set_bounds_of_feature(i, mn, mx)

        for i in range(len(Y)):
            try:
                data.add_data_point(X[i].tolist(),Y[i])
            except:
                self.logger.warning("failed to process datapoint: %s", str(X[i].tolist()))
                raise
        
        forest.fit(data, rng)

        self.the_forest = forest

        # initialize a dictionary with parameter dims
        self.variance_dict = dict()    


        # getting split values
        forest_split_values = self.the_forest.all_split_values()
        
        
        # all midpoints and interval sizes treewise for the whole forest
        self.all_midpoints = []
        self.all_sizes = []
        
        
        #compute midpoints and interval sizes for variables in each tree
        for tree_split_values in forest_split_values:
            sizes =[]
            midpoints =  []
            for i, split_vals in enumerate(tree_split_values):
                if np.isnan(pcs[i][1]): # categorical parameter
                    # check if the tree actually splits on this parameter
                    if len(split_vals) > 0:
                        midpoints.append(split_vals)
                        sizes.append( np.ones(len(split_vals)))
                    # if not, simply append 0 as the value with the number
                    # of categories as the size, that way this parameter will
                    # get 0 importance from this tree.
                    else:
                        midpoints.append((0,))
                        sizes.append((pcs[i][0],))
                else:
                    # add bounds to split values
                    sv = np.array([pcs[i][0]] + list(split_vals) + [pcs[i][1]])
                    # compute midpoints and sizes
                    midpoints.append((1/2)* (sv[1:] + sv[:-1]))
                    sizes.append(sv[1:] - sv[:-1])

            self.all_midpoints.append(midpoints)
            self.all_sizes.append(sizes)
            

        # capital V in the paper
        self.trees_total_variances = []
        # dict of lists where the keys are tuples of the dimensions
        # and the value list contains \hat{f}_U for the individual trees
        # reset all the variance fractions computed
        self.trees_variance_fractions = {}
        self.V_U_total = {}
        self.V_U_individual = {}

        self.cutoffs = cutoffs
        self.set_cutoffs(cutoffs)