Example #1
0
 def save_to(self, filename, overwrite=False):
     """
     Set file to save results
     :param filename:
     :param overwrite:
     """
     self.output_file = CheckpointFile(filename, keys=['board', 'dataset', 'clf_description'])
     if overwrite:
         self.output_file.clear()
Example #2
0
    def save_to(self, filename, existing):
        """
        Save results to a file
        :param filename: string
        :param existing: string what to do if a benchmark already exists. One of {skip, overwrite}
        """
        assert existing in ['skip', 'overwrite'
                            ], 'skip MUST be one of {skip, overwrite}'

        self.checkpoints = CheckpointFile(filename,
                                          keys=['fqbn', 'dataset', 'clf'])
        self.existing = existing
Example #3
0
 def __init__(self):
     """Init"""
     self.classifiers = []
     self.hidden_columns = []
     self.output_file = CheckpointFile(None,
                                       keys=['board', 'dataset', 'clf'])
     self.all_columns = [
         'board', 'dataset', 'clf', 'n_features', 'flash', 'raw_flash',
         'flash_percent', 'flash_score', 'memory', 'raw_memory',
         'memory_percent', 'memory_score', 'offline_accuracy',
         'online_accuracy', 'inference_time'
     ]
Example #4
0
 def __init__(self, dataset):
     """
     :param dataset:
     """
     assert isinstance(dataset, tuple), 'Dataset MUST be a tuple, either (name, X, y) or (X, y)'
     assert len(dataset) == 2 or len(dataset) == 3, 'Dataset MUST be a tuple, either (name, X, y) or (X, y)'
     self.dataset = dataset if len(dataset) == 3 else ('Unknown dataset', dataset[0], dataset[1])
     self.classifiers = []
     self.constraints = {
         'offline': [],
         'resources': [],
         'runtime': []
     }
     self.features = set([])
     self.output_file = CheckpointFile(None, keys=['board', 'dataset', 'clf_description'])
     self.candidates = []
     self.runs = 0
     self.Result = namedtuple('GridSearchResult', 'clf clf_description accuracy min_accuracy max_accuracy flash flash_percent memory memory_percent inference_time passes')
Example #5
0
    def benchmark(self,
                  port=None,
                  project=None,
                  inference_time=False,
                  save_to=None,
                  exists_ok=True,
                  exists_overwrite=False,
                  cv=3,
                  before_upload=None,
                  after_upload=None):
        """

        """
        if inference_time:
            assert port is not None or (
                project is not None and project.board is not None
                and project.board.port is not None), 'You MUST set a port'

        save_to = CheckpointFile(save_to, keys=['board', 'dataset', 'clf'])

        if save_to.key_exists(self.key) and exists_ok:
            return

        if save_to.key_exists(self.key) and not exists_overwrite:
            raise BoardBenchmarkAlreadyExists(self.key)

        if project is None:
            project = Project()

        if inference_time and port:
            project.board.set_port(port)

        # benchmark offline accuracy
        X = self.dataset.X
        y = self.dataset.y
        idx = np.arange(len(X))[::(len(X) // 5)][:5]
        X_test = X[idx]
        y_test = y[idx]
        cross_results = cross_validate(self.classifier.generator(X, y),
                                       X,
                                       y,
                                       cv=cv,
                                       return_estimator=True)
        offline_accuracy = cross_results['test_score'].mean()
        clf = cross_results['estimator'][0]

        benchmark = {
            'board': self.board.name,
            'dataset': self.dataset.name,
            'clf': self.classifier.name,
            'fqbn': '',
            'cpu_speed': self.board.cpu_speed,
            'cpu_family': self.board.cpu_family,
            'n_samples': X.shape[0],
            'n_features': X.shape[1],
            'offline_accuracy': offline_accuracy,
            'inference_time': 0
        }

        with project.tmp_project() as tmp:
            tmp.board.set_model(self.board)

            benchmark['fqbn'] = tmp.board.fqbn
            cache_key = (self.board.name, self.dataset.name)

            if cache_key not in BoardBenchmark._cache:
                BoardBenchmark._cache[cache_key] = self.get_baseline(
                    tmp, X_test)

            baseline = BoardBenchmark._cache.get(cache_key)

            sketch = jinja('metrics/Resources.jinja', {'X': X_test})
            ported = port_clf(clf, classname='Classifier')
            tmp.files.add('%s.ino' % tmp.name, contents=sketch, exists_ok=True)
            tmp.files.add('Classifier.h', contents=ported, exists_ok=True)

            resources = self._parse_resources(tmp)
            resources[
                'flash_increment'] = resources['flash'] - baseline['flash']
            resources[
                'memory_increment'] = resources['memory'] - baseline['memory']
            resources['flash_increment_percent'] = float(
                resources['flash_increment']
            ) / resources['flash_max'] if resources['flash_max'] > 0 else 0
            resources['memory_increment_percent'] = float(
                resources['memory_increment']
            ) / resources['memory_max'] if resources['memory_max'] > 0 else 0
            benchmark.update(resources)

            if inference_time:
                sketch = jinja('metrics/Runtime.jinja', {
                    'X': X_test,
                    'y': y_test
                })
                ported = port_clf(clf, classname='Classifier')

                tmp.files.add(tmp.ino_name, contents=sketch, exists_ok=True)
                tmp.files.add('Classifier.h', contents=ported, exists_ok=True)
                if callable(before_upload):
                    before_upload()
                tmp.upload(success_message='')
                if callable(after_upload):
                    after_upload(tmp)
                benchmark.update(self._parse_inference_time(tmp))

        save_to.set(self.key, benchmark)

        return benchmark
Example #6
0
class Suite:
    """
    Run a suite of benchmarks
    """
    def __init__(self, project):
        """
        Constructor
        :param project:
        """
        self.project = project
        self.datasets = []
        self.classifiers = []
        self.x_train = None
        self.x_valid = None
        self.x_test = None
        self.y_train = None
        self.y_valid = None
        self.y_test = None
        self.validation_size = 0
        self.shuffle = True
        self.checkpoints = None
        self.existing = None

    def set_datasets(self, datasets, validation_size=0, shuffle=True):
        """
        Set datasets for the suite
        :param datasets: list|Dataset
        :param validation_size: float percent of samples to use as validation
        :param shuffle: bool if dataset should be shuffled before splitting
        """
        self.datasets = datasets if isinstance(datasets, list) else [datasets]
        self.validation_size = validation_size
        self.shuffle = shuffle

        for i, dataset in enumerate(self.datasets):
            assert isinstance(
                dataset,
                Dataset), 'dataset[%d] MUST be an instance of Dataset' % i

        assert isinstance(self.validation_size,
                          float), 'validation_size MUST be a float'

    def set_classifiers(self, classifiers):
        """
        Set classifiers for the suite
        :param classifiers: list|Classifier
        """
        self.classifiers = classifiers if isinstance(classifiers,
                                                     list) else [classifiers]

        for i, classifier in enumerate(self.classifiers):
            assert isinstance(
                classifier, Classifier
            ), 'classifiers[%d] MUST be an instance of Classifier: instance of %s given' % (
                i, type(classifier).__name__)

    def save_to(self, filename, existing):
        """
        Save results to a file
        :param filename: string
        :param existing: string what to do if a benchmark already exists. One of {skip, overwrite}
        """
        assert existing in ['skip', 'overwrite'
                            ], 'skip MUST be one of {skip, overwrite}'

        self.checkpoints = CheckpointFile(filename,
                                          keys=['fqbn', 'dataset', 'clf'])
        self.existing = existing

    def run(self, samples_size=10, cross_validate=3, time=False):
        """
        Run benchmark suite
        :param samples_size: int how many samples to use for resources benchmark. 0 means no resource benchmark
        :param cross_validate: int folds for cross validation accuracy estimate. 0 means no accuracy
        :param time: bool True to benchmark onboard inference time
        """
        num_datasets = len(self.datasets)
        num_classifiers = len(self.classifiers)
        results = []

        # load existing results
        if self.checkpoints is not None and self.existing == 'skip' and os.path.isfile(
                self.checkpoints.filename):
            with open(self.checkpoints.filename) as file:
                results = list(csv.DictReader(file))

        for i, dataset in enumerate(self.datasets):
            self.project.logger.info('[%d/%d] Benchmarking dataset %s' %
                                     (i + 1, num_datasets, dataset.name))

            # benchmark baseline for the classifier
            Benchmarker.baseline(project=self.project,
                                 dataset=dataset,
                                 samples_size=samples_size)
            self.project.logger.info('Benchmarked baseline')

            for j, clf in enumerate(self.classifiers):
                self.project.logger.info('[%d/%d] Benchmarking classifier %s' %
                                         (j + 1, num_classifiers, clf.name))

                key = (self.project.board.fqbn, dataset.name, clf.name)

                if self.checkpoints is not None and self.checkpoints.key_exists(
                        key) and self.existing == 'skip':
                    self.project.logger.info('A checkpoint exists, skipping')
                    continue

                # benchmark accuracy
                accuracy = clf.cross_val_score(
                    dataset,
                    num_folds=cross_validate,
                    validation_size=self.validation_size
                ) if cross_validate > 1 else 0

                # train classifier
                clf.fit(dataset.X,
                        dataset.y,
                        validation_size=self.validation_size)

                result = {
                    'board': self.project.board.name,
                    'fqbn': self.project.board.fqbn,
                    'dataset': dataset.name,
                    'clf': clf.name,
                    'n_features': dataset.num_features,
                    'accuracy': accuracy
                }

                # benchmark resources
                if samples_size > 0:
                    benchmarker = Benchmarker(project=self.project,
                                              dataset=dataset,
                                              clf=clf)
                    resources = benchmarker.get_resources(
                        samples_size=samples_size)
                    result.update(resources)

                # benchmark onboard inference time
                if time:
                    benchmarker = Benchmarker(project=self.project,
                                              dataset=dataset,
                                              clf=clf)
                    inference_time = benchmarker.get_inference_time(
                        samples_size=samples_size)
                    result.update(inference_time=inference_time)

                # update results
                results.append(result)

                if self.checkpoints is not None:
                    self.checkpoints.set(key, result)

        return results
Example #7
0
class BenchmarkEndToEnd:
    """Run a moltitude of runtime benchmarks"""
    def __init__(self):
        """Init"""
        self.classifiers = []
        self.hidden_columns = []
        self.output_file = CheckpointFile(None,
                                          keys=['board', 'dataset', 'clf'])
        self.all_columns = [
            'board', 'dataset', 'clf', 'n_features', 'flash', 'raw_flash',
            'flash_percent', 'flash_score', 'memory', 'raw_memory',
            'memory_percent', 'memory_score', 'offline_accuracy',
            'online_accuracy', 'inference_time'
        ]

    @property
    def result(self):
        """
        Return first result
        :return:
        """
        return None if self.df.empty else self.df.loc[0]

    @property
    def columns(self):
        """
        Get columns for DataFrame
        :return:
        """
        return [
            column for column in self.all_columns
            if column not in self.hidden_columns
        ]

    @property
    def summary_columns(self):
        """
        Get important columns for DataFrame
        :return:
        """
        return [
            'board', 'dataset', 'clf', 'flash', 'memory', 'offline_accuracy',
            'online_accuracy', 'inference_time'
        ]

    @property
    def df(self):
        """
        Get results as pandas.DataFrame
        :return:
        """
        return pd.DataFrame(self.output_file.df, columns=self.columns)

    @property
    def sorted_df(self):
        """
        Get df sorted by board, dataset, classifier
        :return:
        """
        return self.df.sort_values(by=['board', 'dataset', 'clf'])

    @property
    def plot(self):
        """
        Get plotter utility
        :return:
        """
        return BenchmarkPlotter(self.df)

    def save_to(self, filename, overwrite=False):
        """
        Set file to save results
        :param filename:
        :param overwrite:
        """
        self.output_file = CheckpointFile(filename,
                                          keys=['board', 'dataset', 'clf'])
        if overwrite:
            self.output_file.clear()

    def set_precision(self, digits):
        """
        Set pandas precision
        :param digits:
        :return:
        """
        pd.set_option('precision', digits)

    def hide(self, *args):
        """
        Hide columns from DataFrame
        :param args:
        :return:
        """
        self.hidden_columns += args

    def benchmark(self,
                  project,
                  datasets,
                  classifiers,
                  boards=None,
                  accuracy=True,
                  runtime=False,
                  offline_test_size=0.3,
                  cross_val=3,
                  online_test_size=20,
                  repeat=5,
                  port=None,
                  upload_options={},
                  random_state=0):
        """
        Run benchmark on the combinations of boards x datasets x classifiers
        :param project:
        :param boards:
        :param datasets:
        :param classifiers:
        :param accuracy:
        :param runtime:
        :param offline_test_size:
        :param cross_val:
        :param online_test_size:
        :param repeat:
        :param port:
        :param random_state:
        :return:
        """

        if boards is None:
            assert project.board.model is not None and len(
                project.board.model.fqbn
            ) > 0, 'You MUST specify at least a board'
            boards = [project.board.model.fqbn]

        if port is None:
            # set 'auto' port if runtime is active
            if runtime and project.board.port is None:
                project.board.set_port('auto')
        else:
            project.board.set_port(port)

        n_run = 0
        n_combos = len(self.to_list(boards)) * len(
            self.to_list(datasets)) * len(self.to_list(classifiers))

        for board_name in self.to_list(boards):
            # set board
            project.board.set_model(board_name)
            board_name = project.board.name

            # if benchmarking runtime, we need the board to be connected
            # so be sure the sure has done the physical setup
            if runtime:
                input('Benchmarking board %s: press Enter to continue...' %
                      board_name)

            # get the resources needed for the empty sketch
            baseline_resources = Resources(project).baseline()

            for dataset_name, (X, y) in self.to_list(datasets):
                for clf_name, clf in self.to_list(classifiers):
                    n_run += 1
                    project.logger.info('[%d/%d] Benchmarking %s x %s x %s',
                                        n_run, n_combos, board_name,
                                        dataset_name, clf_name)

                    if self.output_file.key_exists(
                        (board_name, dataset_name, clf_name)):
                        existing = self.output_file.get(
                            (board_name, dataset_name, clf_name))
                        # skip if we have all the data for the combination
                        if not runtime or float(existing.inference_time) > 0:
                            project.logger.debug(
                                'A checkpoint exists, skipping')
                            continue

                    # if clf is a lambda function, call with X, y arguments
                    if callable(clf):
                        clf_clone = clf(X, y)
                    else:
                        # make a copy of the original classifier
                        clf_clone = clone(clf)

                    # benchmark classifier accuracy (off-line)
                    if accuracy:
                        if cross_val:
                            cross_results = cross_validate(
                                clf_clone,
                                X,
                                y,
                                cv=cross_val,
                                return_estimator=True)
                            offline_accuracy = cross_results[
                                'test_score'].mean()
                            # keep first classifier
                            clf_clone = cross_results['estimator'][0]
                        else:
                            X_train, X_test, y_train, y_test = train_test_split(
                                X,
                                y,
                                test_size=offline_test_size,
                                random_state=random_state)
                            offline_accuracy = clf_clone.fit(
                                X_train, y_train).score(X_test, y_test)
                    else:
                        offline_accuracy = 0
                        clf_clone.fit(X, y)

                    try:
                        resources_benchmark = Resources(project).benchmark(
                            clf_clone, x=X[0])
                    except NotFittedError:
                        project.logger.error(
                            'Classifier not fitted, cannot benchmark')
                        continue
                    except ArduinoCliCommandError:
                        project.logger.error('Arduino CLI reported an error')
                        continue
                    except Exception as err:
                        project.logger.error('Generic error', err)
                        continue

                    # benchmark on-line inference time and accuracy
                    if runtime:
                        try:
                            X_train, X_test, y_train, y_test = train_test_split(
                                X,
                                y,
                                test_size=online_test_size,
                                random_state=random_state)
                            runtime_benchmark = Runtime(project).benchmark(
                                clf_clone,
                                X_test,
                                y_test,
                                repeat=repeat,
                                upload_options=upload_options)
                            project.logger.info(
                                'Benchmarked runtime inference')
                        except BadBoardResponseError as e:
                            project.logger.error(e)
                            runtime_benchmark = Runtime.empty()
                    else:
                        runtime_benchmark = Runtime.empty()

                    self.classifiers.append(clf_clone)
                    self.add_result(board=board_name,
                                    dataset=dataset_name,
                                    clf=clf_name,
                                    shape=X.shape,
                                    offline_accuracy=offline_accuracy,
                                    resources=resources_benchmark,
                                    runtime=runtime_benchmark,
                                    baseline=baseline_resources)

                    sleep(2)

        return self

    def add_result(self, board, dataset, clf, shape, offline_accuracy,
                   resources, runtime, baseline):
        """
        Add result to list
        :param board:
        :param dataset:
        :param clf:
        :param shape:
        :param offline_accuracy:
        :param resources:
        :param runtime:
        :param baseline:
        :return:
        """
        raw_flash = resources['flash']
        raw_memory = resources['memory']

        if baseline:
            resources['flash'] -= baseline['flash']
            resources['memory'] -= baseline['memory']

        result = {
            'board': board,
            'dataset': dataset,
            'clf': clf,
            'n_features': shape[1],
            'flash': resources['flash'],
            'memory': resources['memory'],
            'raw_flash': raw_flash,
            'raw_memory': raw_memory,
            'flash_percent': resources['flash_percent'],
            'memory_percent': resources['memory_percent'],
            'flash_score': offline_accuracy * (1 - resources['flash_percent']),
            'memory_score':
            offline_accuracy * (1 - resources['memory_percent']),
            'offline_accuracy': offline_accuracy,
            'online_accuracy': runtime['online_accuracy'],
            'inference_time': runtime['inference_time']
        }
        self.output_file.set((board, dataset, clf), result)

    def to_list(self, x):
        """
        Convert argument to list, if not already
        :param x:
        :return:
        """
        return x if isinstance(x, list) else [x]
Example #8
0
class GridSearch:
    """
    Perform grid search on supported classifiers
    """
    DEFAULT_HYPERPARAMETERS = {
        'DecisionTreeClassifier': [{
            'max_depth': [5, 10, 20, None],
            'min_samples_leaf': [1, 5, 10],
            'max_features': [0.5, 0.75, "sqrt", None]
        }],
        'RandomForestClassifier': [{
            'n_estimators': [10, 50, 100, 200],
            'max_depth': [5, 10, 20, None],
            'min_samples_leaf': [1, 5, 10],
            'max_features': [0.5, 0.75, "sqrt", None]
        }],
        'XGBClassifier': [{
            'n_estimators': [10, 50, 100, 200],
            'max_depth': [5, 10, 20, None],
            'eta': [0.1, 0.3, 0.7],
            'gamma': [0, 1, 10]
        }],
        'GaussianNB': [{
            'var_smoothing': [1e-5, 1e-7, 1e-9]
        }],
        'LogisticRegression': [{
            'penalty': ['l1', 'l2'],
            'C': [0.01, 0.1, 1],
            'max_iter': [1e3, 1e4, 1e5]
        }],
        'SVC': [{
            'kernel': ['linear'],
            'C': [0.01, 0.1, 1],
            'max_iter': [1000, -1]
        }, {
            'kernel': ['poly'],
            'degree': [2, 3],
            'gamma': [0.001, 0.1, 1, 'auto'],
            'C': [0.01, 0.1, 1],
            'max_iter': [1000, -1]
        }, {
            'kernel': ['rbf'],
            'gamma': [0.001, 0.1, 1, 'auto'],
            'C': [0.01, 0.1, 1],
            'max_iter': [1000, -1]
        }]
    }

    def __init__(self, dataset):
        """
        :param dataset:
        """
        assert isinstance(dataset, tuple), 'Dataset MUST be a tuple, either (name, X, y) or (X, y)'
        assert len(dataset) == 2 or len(dataset) == 3, 'Dataset MUST be a tuple, either (name, X, y) or (X, y)'
        self.dataset = dataset if len(dataset) == 3 else ('Unknown dataset', dataset[0], dataset[1])
        self.classifiers = []
        self.constraints = {
            'offline': [],
            'resources': [],
            'runtime': []
        }
        self.features = set([])
        self.output_file = CheckpointFile(None, keys=['board', 'dataset', 'clf_description'])
        self.candidates = []
        self.runs = 0
        self.Result = namedtuple('GridSearchResult', 'clf clf_description accuracy min_accuracy max_accuracy flash flash_percent memory memory_percent inference_time passes')

    @property
    def df(self):
        """
        Get candidates as pandas.DataFrame
        :return: pandas.DataFrame
        """
        if self.output_file.filename is not None:
            return self.output_file.df

        df = pd.DataFrame(self.candidates)
        if 'clf' in df.columns:
            df = df.drop(columns=['clf'])
        return df

    @property
    def df_passes(self):
        """
        Get candidates that pass the constraints
        """
        return self.df[self.df.passes]

    def add_classifier(self, clf, only=None, merge=None):
        """
        Add classifier to list of candidates
        :param only: search ONLY these params
        :param merge: search ALSO these params, plus the defaults
        """
        if only is not None:
            search_params = only if isinstance(only, list) else [only]
        else:
            search_params = None
            defaults = GridSearch.DEFAULT_HYPERPARAMETERS

            # merge defaults with user supplied
            for clf_type, params in defaults.items():
                if self._check_type(clf, clf_type):
                    search_params = params
                    if merge is not None:
                        search_params += merge if isinstance(merge, list) else [merge]
                    break

            assert search_params is not None, 'Cannot find default search params for %s, you MUST set only=' % type(clf)

        self.classifiers.append((clf, search_params))

    def save_to(self, filename, overwrite=False):
        """
        Set file to save results
        :param filename:
        :param overwrite:
        """
        self.output_file = CheckpointFile(filename, keys=['board', 'dataset', 'clf_description'])
        if overwrite:
            self.output_file.clear()

    def min_accuracy(self, acc):
        """
        Add constraint on min accuracy
        :param acc:
        """
        self.add_offline_constraint(lambda result: result.accuracy >= acc)

    def max_flash(self, flash):
        """
        Add constraint on flash size
        :param flash:
        """
        self.add_resources_constraint(lambda result: result.flash <= flash)

    def max_flash_percent(self, percent):
        """
        Add constraint on flash size
        :param percent:
        """
        self.add_resources_constraint(lambda result: result.flash_percent <= percent)

    def max_memory(self, memory):
        """
        Add constraint on memory size
        :param memory:
        """
        self.add_resources_constraint(lambda result: result.memory <= memory)

    def max_memory_percent(self, percent):
        """
        Add constraint on memory size
        :param percent:
        """
        self.add_resources_constraint(lambda result: result.memory_percent <= percent)

    def max_inference_time(self, micros):
        """
        Add constraint on inference time
        :param micros:
        """
        self.add_runtime_constraint(lambda result: result.inference_time <= micros)

    def add_offline_constraint(self, constraint):
        """
        Add constraint to offline result
        :param constraint:
        """
        self._add_constraint('offline', constraint)

    def add_resources_constraint(self, constraint):
        """
        Add constraint to resources result
        :param constraint:
        """
        self._add_constraint('resources', constraint)

    def add_runtime_constraint(self, constraint):
        """
        Add constraint to runtime result
        :param constraint:
        """
        self._add_constraint('runtime', constraint)

    def search(self, project, cv=3):
        """
        Perform search
        :param project:
        :param cv: cross validation splits
        """
        dataset_name, X, y = self.dataset
        board_name = project.board.fqbn
        
        for base_clf, search_bags in self.classifiers:
            project.logger.debug('Tuning %s', type(base_clf).__name__)
            # naive implementation of grid search
            for search_params in search_bags:
                for combo in product(*search_params.values()):
                    current_params = {k: v for k, v in zip(search_params.keys(), combo)}
                    params_string = ', '.join(['%s=%s' % (k, str(v)) for k, v in current_params.items()])
                    clf_description = '%s (%s)' % (type(base_clf).__name__, params_string)
                    project.logger.debug('Benchmarking %s', clf_description)

                    if self.output_file.key_exists((board_name, dataset_name, clf_description)):
                        project.logger.debug('A checkpoint exists, skipping')
                        continue

                    self.runs += 1

                    clf = clone(base_clf)
                    clf.set_params(**current_params)
                    crossval = cross_validate(estimator=clf, X=X, y=y, cv=cv, return_estimator=True)
                    best_idx = np.argmax(crossval['test_score'])
                    result = self.Result(
                        clf=crossval['estimator'][best_idx],
                        clf_description=clf_description,
                        accuracy=crossval['test_score'].mean(),
                        min_accuracy=crossval['test_score'].min(),
                        max_accuracy=crossval['test_score'].max(),
                        flash=0,
                        flash_percent=0,
                        memory=0,
                        memory_percent=0,
                        inference_time=0,
                        passes=True)

                    passes = True

                    # apply offline constraints
                    for constraint in self.constraints['offline']:
                        if not constraint(result):
                            project.logger.debug('%s didn\'t passed the offline constraints', clf_description)
                            passes = False
                            break
                    if not passes:
                        self._checkpoint(board_name, dataset_name, clf_description, result, False)
                        continue

                    # apply resources constraints
                    if len(self.constraints['resources']):
                        resources = Resources(project).benchmark(result.clf, x=X[0])
                        result = result._replace(
                            flash=resources['flash'],
                            flash_percent=resources['flash_percent'],
                            memory=resources['memory'],
                            memory_percent=resources['memory_percent']
                        )
                        for constraint in self.constraints['resources']:
                            if not constraint(result):
                                project.logger.debug('%s didn\'t passed the resources constraints', clf_description)
                                passes = False
                                break
                    if not passes:
                        self._checkpoint(board_name, dataset_name, clf_description, result, False)
                        continue

                    # apply runtime constraints
                    if len(self.constraints['runtime']):
                        runtime = Runtime(project).benchmark(result.clf, X[:3], y[:3], repeat=10, compile=False)
                        result = result._replace(
                            inference_time=runtime['inference_time']
                        )
                        for constraint in self.constraints['runtime']:
                            if not constraint(result):
                                project.logger.debug('%s didn\'t passed the resources constraints', clf_description)
                                passes = False
                                break
                    if not passes:
                        self._checkpoint(board_name, dataset_name, clf_description, result, False)
                        continue

                    # all constraints passed, add to candidates
                    project.logger.debug('%s passed all constraints, added to the list of candidates', clf_description)
                    self._checkpoint(board_name, dataset_name, clf_description, result, True)

    def _check_type(self, clf, *classes):
        """
        Check if clf is instance of given class
        :return: bool
        """
        for klass in classes:
            if type(clf).__name__ == klass:
                return True
            for T in getmro(type(clf)):
                if T.__name__ == klass:
                    return True
        return False

    def _add_constraint(self, env, constraint):
        """
        Add constraint to results to be considered
        :param env: when the constraint should run (offline, resources, runtime)
        :param constraint:
        """
        assert callable(constraint), 'constraint MUST be a function'
        self.constraints[env].append(constraint)

    def _checkpoint(self, board_name, dataset_name, clf_name, result, passes):
        """
        Save checkpoint for search
        :param board_name:
        :param dataset_name:
        :param clf_name:
        :param result:
        :param passes:
        """
        result = result._replace(passes=passes)
        result = {**result._asdict(), **{
            'board': board_name,
            'dataset': dataset_name
        }}
        del result['clf']
        self.output_file.set((board_name, dataset_name, clf_name), result)
        self.candidates.append(result)