Exemplo n.º 1
0
def log_metrics(expdir: str, filename: str, metrics, test_acc: float, args,
                nsds: Nasbench101Dataset, model_id: int) -> None:
    print(f'filename: {filename}', f'test_acc: {test_acc}',
          f'nasbenc101_test_acc: {nsds.get_test_acc(model_id)}', metrics[-1])
    results = [
        ('test_acc', test_acc),
        ('nasbenc101_test_acc', nsds.get_test_acc(model_id)),
        ('val_acc', metrics[-1]['val_top1']),
        ('epochs', args.epochs),
        ('train_batch_size', args.train_batch_size),
        ('test_batch_size', args.test_batch_size),
        ('model_name', args.model_name),
        ('exp_name', args.experiment_name),
        ('exp_desc', args.experiment_description),
        ('seed', args.seed),
        ('devices', utils.cuda_device_names()),
        ('half', args.half),
        ('cutout', args.cutout),
        ('train_acc', metrics[-1]['train_top1']),
        ('loader_workers', args.loader_workers),
        ('date', str(time.time())),
    ]
    utils.append_csv_file(os.path.join(expdir, f'{filename}.tsv'), results)
    with open(os.path.join(expdir, f'{filename}_metrics.yaml'), 'w') as f:
        yaml.dump(metrics, f)
    with open(os.path.join(expdir, f'{filename}_nasbench101.yaml'), 'w') as f:
        yaml.dump(nsds[model_id], f)
Exemplo n.º 2
0
    def _save_trained(self, reductions:int, cells:int, nodes:int,
                      search_iter:int,
                      metrics_stats:MetricsStats)->None:
        """Save the model and metric info into a log file"""

        # construct path where we will save
        subdir = utils.full_path(self.metrics_dir.format(**vars()), create=True)

        # save metric_infi
        metrics_stats_filepath = os.path.join(subdir, 'metrics_stats.yaml')
        if metrics_stats_filepath:
            with open(metrics_stats_filepath, 'w') as f:
                yaml.dump(metrics_stats, f)

        # save just metrics separately
        metrics_filepath = os.path.join(subdir, 'metrics.yaml')
        if metrics_filepath:
            with open(metrics_filepath, 'w') as f:
                yaml.dump(metrics_stats.train_metrics, f)

        logger.info({'metrics_stats_filepath': metrics_stats_filepath,
                     'metrics_filepath': metrics_filepath})

        # append key info in root pareto data
        if self._parito_filepath:
            train_top1 = val_top1 = train_epoch = val_epoch = math.nan
            # extract metrics
            if metrics_stats.train_metrics:
                best_metrics = metrics_stats.train_metrics.run_metrics.best_epoch()
                train_top1 = best_metrics[0].top1.avg
                train_epoch = best_metrics[0].index
                if best_metrics[1]:
                    val_top1 = best_metrics[1].top1.avg if len(best_metrics)>1 else math.nan
                    val_epoch = best_metrics[1].index if len(best_metrics)>1 else math.nan

            # extract model stats
            if metrics_stats.model_stats:
                flops = metrics_stats.model_stats.Flops
                parameters = metrics_stats.model_stats.parameters
                inference_memory = metrics_stats.model_stats.inference_memory
                inference_duration = metrics_stats.model_stats.duration
            else:
                flops = parameters = inference_memory = inference_duration = math.nan

            utils.append_csv_file(self._parito_filepath, [
                ('reductions', reductions),
                ('cells', cells),
                ('nodes', nodes),
                ('search_iter', search_iter),
                ('train_top1', train_top1),
                ('train_epoch', train_epoch),
                ('val_top1', val_top1),
                ('val_epoch', val_epoch),
                ('flops', flops),
                ('params', parameters),
                ('inference_memory', inference_memory),
                ('inference_duration', inference_duration)
                ])
Exemplo n.º 3
0
    def save_trained(self, conf_search: Config, reductions: int, cells: int,
                     nodes: int, model_metrics: ModelMetrics) -> None:
        """Save the model and metric info into a log file"""

        metrics_dir = conf_search['metrics_dir']

        # construct path where we will save
        subdir = utils.full_path(metrics_dir.format(**vars()), create=True)

        model_stats = nas_utils.get_model_stats(model_metrics.model)

        # save model_stats in its own file
        model_stats_filepath = os.path.join(subdir, 'model_stats.yaml')
        if model_stats_filepath:
            with open(model_stats_filepath, 'w') as f:
                yaml.dump(model_stats, f)

        # save just metrics separately for convinience
        metrics_filepath = os.path.join(subdir, 'metrics.yaml')
        if metrics_filepath:
            with open(metrics_filepath, 'w') as f:
                yaml.dump(model_stats.metrics, f)

        logger.info({
            'model_stats_filepath': model_stats_filepath,
            'metrics_filepath': metrics_filepath
        })

        # append key info in root pareto data
        if self._summary_filepath:
            train_top1 = val_top1 = train_epoch = val_epoch = math.nan
            # extract metrics
            if model_metrics.metrics:
                best_metrics = model_metrics.metrics.run_metrics.best_epoch()
                train_top1 = best_metrics[0].top1.avg
                train_epoch = best_metrics[0].index
                if best_metrics[1]:
                    val_top1 = best_metrics[1].top1.avg if len(
                        best_metrics) > 1 else math.nan
                    val_epoch = best_metrics[1].index if len(
                        best_metrics) > 1 else math.nan

            # extract model stats
            flops = model_stats.Flops
            parameters = model_stats.parameters
            inference_memory = model_stats.inference_memory
            inference_duration = model_stats.duration

            utils.append_csv_file(self._summary_filepath,
                                  [('reductions', reductions),
                                   ('cells', cells), ('nodes', nodes),
                                   ('train_top1', train_top1),
                                   ('train_epoch', train_epoch),
                                   ('val_top1', val_top1),
                                   ('val_epoch', val_epoch), ('flops', flops),
                                   ('params', parameters),
                                   ('inference_memory', inference_memory),
                                   ('inference_duration', inference_duration)])