Exemplo n.º 1
0
 def save_to(self, filename, overwrite=False):
     """
     Set file to save results
     :param filename:
     :param overwrite:
     """
     self.output_file = CheckpointFile(filename, keys=['board', 'dataset', 'clf_description'])
     if overwrite:
         self.output_file.clear()
Exemplo n.º 2
0
    def save_to(self, filename, existing):
        """
        Save results to a file
        :param filename: string
        :param existing: string what to do if a benchmark already exists. One of {skip, overwrite}
        """
        assert existing in ['skip', 'overwrite'
                            ], 'skip MUST be one of {skip, overwrite}'

        self.checkpoints = CheckpointFile(filename,
                                          keys=['fqbn', 'dataset', 'clf'])
        self.existing = existing
Exemplo n.º 3
0
 def __init__(self):
     """Init"""
     self.classifiers = []
     self.hidden_columns = []
     self.output_file = CheckpointFile(None,
                                       keys=['board', 'dataset', 'clf'])
     self.all_columns = [
         'board', 'dataset', 'clf', 'n_features', 'flash', 'raw_flash',
         'flash_percent', 'flash_score', 'memory', 'raw_memory',
         'memory_percent', 'memory_score', 'offline_accuracy',
         'online_accuracy', 'inference_time'
     ]
Exemplo n.º 4
0
 def __init__(self, dataset):
     """
     :param dataset:
     """
     assert isinstance(dataset, tuple), 'Dataset MUST be a tuple, either (name, X, y) or (X, y)'
     assert len(dataset) == 2 or len(dataset) == 3, 'Dataset MUST be a tuple, either (name, X, y) or (X, y)'
     self.dataset = dataset if len(dataset) == 3 else ('Unknown dataset', dataset[0], dataset[1])
     self.classifiers = []
     self.constraints = {
         'offline': [],
         'resources': [],
         'runtime': []
     }
     self.features = set([])
     self.output_file = CheckpointFile(None, keys=['board', 'dataset', 'clf_description'])
     self.candidates = []
     self.runs = 0
     self.Result = namedtuple('GridSearchResult', 'clf clf_description accuracy min_accuracy max_accuracy flash flash_percent memory memory_percent inference_time passes')
Exemplo n.º 5
0
    def benchmark(self,
                  port=None,
                  project=None,
                  inference_time=False,
                  save_to=None,
                  exists_ok=True,
                  exists_overwrite=False,
                  cv=3,
                  before_upload=None,
                  after_upload=None):
        """

        """
        if inference_time:
            assert port is not None or (
                project is not None and project.board is not None
                and project.board.port is not None), 'You MUST set a port'

        save_to = CheckpointFile(save_to, keys=['board', 'dataset', 'clf'])

        if save_to.key_exists(self.key) and exists_ok:
            return

        if save_to.key_exists(self.key) and not exists_overwrite:
            raise BoardBenchmarkAlreadyExists(self.key)

        if project is None:
            project = Project()

        if inference_time and port:
            project.board.set_port(port)

        # benchmark offline accuracy
        X = self.dataset.X
        y = self.dataset.y
        idx = np.arange(len(X))[::(len(X) // 5)][:5]
        X_test = X[idx]
        y_test = y[idx]
        cross_results = cross_validate(self.classifier.generator(X, y),
                                       X,
                                       y,
                                       cv=cv,
                                       return_estimator=True)
        offline_accuracy = cross_results['test_score'].mean()
        clf = cross_results['estimator'][0]

        benchmark = {
            'board': self.board.name,
            'dataset': self.dataset.name,
            'clf': self.classifier.name,
            'fqbn': '',
            'cpu_speed': self.board.cpu_speed,
            'cpu_family': self.board.cpu_family,
            'n_samples': X.shape[0],
            'n_features': X.shape[1],
            'offline_accuracy': offline_accuracy,
            'inference_time': 0
        }

        with project.tmp_project() as tmp:
            tmp.board.set_model(self.board)

            benchmark['fqbn'] = tmp.board.fqbn
            cache_key = (self.board.name, self.dataset.name)

            if cache_key not in BoardBenchmark._cache:
                BoardBenchmark._cache[cache_key] = self.get_baseline(
                    tmp, X_test)

            baseline = BoardBenchmark._cache.get(cache_key)

            sketch = jinja('metrics/Resources.jinja', {'X': X_test})
            ported = port_clf(clf, classname='Classifier')
            tmp.files.add('%s.ino' % tmp.name, contents=sketch, exists_ok=True)
            tmp.files.add('Classifier.h', contents=ported, exists_ok=True)

            resources = self._parse_resources(tmp)
            resources[
                'flash_increment'] = resources['flash'] - baseline['flash']
            resources[
                'memory_increment'] = resources['memory'] - baseline['memory']
            resources['flash_increment_percent'] = float(
                resources['flash_increment']
            ) / resources['flash_max'] if resources['flash_max'] > 0 else 0
            resources['memory_increment_percent'] = float(
                resources['memory_increment']
            ) / resources['memory_max'] if resources['memory_max'] > 0 else 0
            benchmark.update(resources)

            if inference_time:
                sketch = jinja('metrics/Runtime.jinja', {
                    'X': X_test,
                    'y': y_test
                })
                ported = port_clf(clf, classname='Classifier')

                tmp.files.add(tmp.ino_name, contents=sketch, exists_ok=True)
                tmp.files.add('Classifier.h', contents=ported, exists_ok=True)
                if callable(before_upload):
                    before_upload()
                tmp.upload(success_message='')
                if callable(after_upload):
                    after_upload(tmp)
                benchmark.update(self._parse_inference_time(tmp))

        save_to.set(self.key, benchmark)

        return benchmark