예제 #1
0
    def run_opt(cls, hparams: Namespace, logger: Logger, checkpoint_dir: str, value_space: ValueSpace,
                constraints: list, objectives: list) -> AbstractAlgorithm:
        """ run the optimization, return all evaluated candidates """
        crossover = {
            'single': SinglePointCrossover,
            'mixed': MixedCrossover,
        }[cls._parsed_argument('crossover', hparams)](value_space)

        nsga2 = NSGA2(
            value_space=value_space,
            crossover=crossover,
            logger=logger,
            save_file='%s/%s.pickle' % (checkpoint_dir, NSGA2.__name__),
            max_iterations=cls._parsed_argument('iterations', hparams),
            population_size=cls._parsed_argument('pop_size', hparams),
            population_core_size=cls._parsed_argument('pop_core', hparams),
            num_tourney_participants=cls._parsed_argument('num_tourney', hparams),
            mutation_probability=cls._parsed_argument('prob_mutations', hparams),
            constraints=constraints,
            objectives=objectives)

        # load, search, return
        log_headline(logger, 'Starting %s' % cls.__name__)
        nsga2.search(load=True, log_each_iteration=True)
        return nsga2
예제 #2
0
    def __init__(self, args: Namespace, *args_, **kwargs):
        super().__init__(args, *args_, **kwargs)

        # args
        self.s1_path = replace_standard_paths(
            self._parsed_argument('s1_path', args))
        self.reset_bn = self._parsed_argument('reset_bn', args)

        # files
        self.tmp_load_path = '%s/checkpoints/checkpoint.tmp.pt' % self.save_dir
        os.makedirs(os.path.dirname(self.tmp_load_path), exist_ok=True)
        shutil.copyfile('%s/data.meta.pt' % self.s1_path,
                        '%s/data.meta.pt' % self.save_dir)

        # one method, one trainer... could be executed in parallel in future?
        log_headline(self.logger, 'setting up...')
        self.add_method()
        self.add_trainer(method=self.get_method(),
                         save_dir=self.save_dir,
                         num_devices=-1)
        self.log_detailed()
        self.get_method().get_network().set_forward_strategy(False)

        # algorithms
        estimator_kwargs = dict(trainer=self.trainer[0],
                                load_path=self.tmp_load_path)
        self.algorithm, self.estimators, self.termination = PymooHPOUtils.prepare(
            self, self.logger, estimator_kwargs, args)
예제 #3
0
 def _run(self):
     """ execute the task """
     log_headline(self.logger, "Hosting the PBT server")
     self.logger.info('{:<15} {:<}'.format('URI', str(self._pyro_uri)))
     self.logger.info('{:<15} {:<}'.format('URI file',
                                           self._communication_file))
     self._daemon.requestLoop(self._loop_condition)
     os.remove(self._communication_file)
예제 #4
0
    def _run(self):
        """ execute the task """
        log_headline(self.logger, "Training")
        self.trainer[0].train_until_max_epoch()

        # save configs
        log_headline(self.logger, "Saving config")
        self.get_method().save_configs("%s/network/" %
                                       self.checkpoint_dir(self.save_dir))
예제 #5
0
 def log_all(cls, logger: Logger):
     """
     log all registered items
     :param logger: where to log to
     """
     log_headline(logger, 'Registered classes')
     log_in_columns(logger, [(dct.text, str(dct.names()))
                             for dct in RegisterDict.all],
                    add_bullets=True)
예제 #6
0
 def _on_barrier_all_results(self):
     # let one client figure out client instructions
     log_headline(self.logger, 'Synchronizing', target_len=80)
     self._next_callback_epoch = self._epoch + 1
     torch.save(self._log_dicts, self._meta_path(self.save_dir))
     self.selector.save(self.save_dir)
     self._responses.clear()
     self._responses = self.selector.select(
         self._epoch, self._log_dicts[self._epoch])
     log_headline(self.logger, 'Waiting for results', target_len=80)
예제 #7
0
 def load(self, checkpoint_dir: str = None) -> 'AbstractTask':
     """ load """
     log_headline(self.logger, 'Loading')
     checkpoint_dir = self.checkpoint_dir(checkpoint_dir)
     try:
         if not self._load(checkpoint_dir):
             self.logger.info('Did not load, maybe nothing to do: %s' % checkpoint_dir)
     except Exception as e:
         self.logger.error('Failed loading from checkpoint dir: "%s"' % checkpoint_dir, exc_info=e)
     return self
예제 #8
0
    def __init__(self, args: Namespace, *args_, **kwargs):
        super().__init__(args, *args_, **kwargs)

        # single method, single trainer
        log_headline(self.logger, 'setting up...')
        self.add_method()
        self.add_trainer(method=self.get_method(),
                         save_dir=self.save_dir,
                         num_devices=-1)
        self.log_detailed()
예제 #9
0
def explore(mini: MiniNASTabularBenchmark, logger: Logger = None, n=-1, sort_by='acc1', maximize=True):
    if logger is None:
        logger = LoggerManager().get_logger()
    log_headline(logger, "highest acc1 topologies (%s, %s, %s)"
                 % (mini.get_name(), mini.get_default_data_set(), mini.get_default_result_type()))
    rows = [("%s rank" % sort_by, "acc1", "loss", "params", "flops", "latency", "tuple")]
    for i, r in enumerate(mini.get_all_sorted([sort_by], [maximize])):
        if i >= n > 0:
            break
        rows.append((i, r.get_acc1(), r.get_loss(), r.get_params(), r.get_flops(), r.get_latency(), r.arch_tuple))
    log_in_columns(logger, rows)
예제 #10
0
    def log_detailed(self):
        # log some things
        log_headline(self.logger, 'Trainer, Method, Data, ...')
        rows = [('Trainer', '')]
        for i, trainer in enumerate(self.trainer):
            rows.append((' (%d)' % i, trainer.str()))
        log_in_columns(self.logger, rows)

        for i, method in enumerate(self.methods):
            log_headline(self.logger, "Method %d/%d" % (i+1, len(self.methods)), target_len=80)
            method.log_detailed(self.logger)

        StrategyManager().log_detailed(self.logger)
예제 #11
0
    def prepare(cls: AbstractTask.__class__, logger: Logger, estimator_kwargs: dict, args: Namespace, index=None) \
            -> (Algorithm, [], Termination):
        """
        :param cls:
        :param logger:
        :param estimator_kwargs:
        :param args: global namespace
        :param index: index of the task
        :return: algorithm class, estimators, termination
        """

        # pymoo hpo algorithm
        cls_algorithm = cls._parsed_meta_argument(
            Register.hpo_pymoo_algorithms,
            'cls_hpo_pymoo_algorithm',
            args,
            index=index)
        assert issubclass(
            cls_algorithm, AbstractPymooAlgorithm
        ), 'Method must have class methods to optimize the arc'
        algorithm = cls_algorithm.from_args(args)

        # estimators
        log_headline(logger, 'adding network estimators')
        estimators = []
        for i, e in enumerate(
                cls._parsed_meta_arguments(Register.hpo_estimators,
                                           'cls_hpo_estimators',
                                           args,
                                           index=index)):
            estimator = e(args=args, index=i, **estimator_kwargs)
            estimators.append(estimator)
            logger.info(estimator.str())

        # termination
        log_headline(logger, 'adding algorithm termination')
        cls_terminator = cls._parsed_meta_argument(
            Register.hpo_pymoo_terminators,
            'cls_hpo_pymoo_termination',
            args,
            index=index)
        assert issubclass(cls_terminator, AbstractPymooTermination),\
            "termination must be a subclass of %s" % AbstractPymooTermination.__name__
        termination = cls_terminator.from_args(args=args, index=None)
        logger.info(cls_terminator().str())

        return algorithm, estimators, termination
예제 #12
0
    def run_opt(cls, hparams: Namespace, logger: Logger, checkpoint_dir: str, value_space: ValueSpace,
                constraints: list, objectives: list, num_eval=None, strategy_name=None) -> AbstractAlgorithm:
        """ run the optimization, return all evaluated candidates """

        randomly = RandomlyEval(
            value_space=value_space,
            logger=logger,
            save_file='%s/%s.pickle' % (checkpoint_dir, RandomlyEval.__name__),
            constraints=constraints,
            objectives=objectives,
            num_eval=num_eval if num_eval is not None else cls._parsed_argument('num_eval', hparams),
            strategy_name=strategy_name)

        # load, search, return
        log_headline(logger, 'Starting %s' % cls.__name__)
        randomly.search(load=True, log_each_iteration=True)
        return randomly
예제 #13
0
 def _on_barrier_first_use(self):
     # let one client figure out client instructions
     log_headline(self.logger,
                  'Setting up initial mutations',
                  target_len=80)
     self._first_use = False
     self._responses = {}
     lines = []
     for client_id, (log_dict, response) in self.selector.first_use(
             self._log_dicts[self._epoch]).items():
         self._responses[client_id] = response
         self._log_dicts[self._epoch][client_id].update(log_dict)
         lines.append([
             "[{:<3}]".format(client_id), "Mutated values",
             str(log_dict)
         ])
     log_in_columns(self.logger, lines)
     log_headline(self.logger, 'Waiting for results', target_len=80)
예제 #14
0
    def explore(mini: MiniNATSBenchTabularBenchmark):
        logger = LoggerManager().get_logger()

        # some stats of specific results
        logger.info(
            mini.get_by_arch_tuple((4, 3, 2, 1, 0, 2)).get_info_str('cifar10'))
        logger.info("")
        mini.get_by_arch_tuple((1, 2, 1, 2, 3, 4)).print(logger.info)
        logger.info("")
        mini.get_by_index(1554).print(logger.info)
        logger.info("")
        mini.get_by_arch_str(
            '|avg_pool_3x3~0|+|nor_conv_1x1~0|skip_connect~1|+|nor_conv_1x1~0|skip_connect~1|skip_connect~2|'
        ).print(logger.info)
        logger.info("")

        # best results by acc1
        rows = [("acc1", "params", "arch tuple", "arch str")]
        log_headline(
            logger, "highest acc1 topologies (%s, %s, %s)" %
            (mini.get_name(), mini.get_default_data_set(),
             mini.get_default_result_type()))
        for i, r in enumerate(mini.get_all_sorted(['acc1'], [True])):
            rows.append(
                (r.get_acc1(), r.get_params(), str(r.arch_tuple), r.arch_str))
            if i > 8:
                break
        log_in_columns(logger, rows)
        logger.info("")

        # best results by acc1
        rows = [("acc1", "arch tuple", "arch str")]
        c = 0
        log_headline(
            logger, "highest acc1 topologies without skip (%s, %s, %s)" %
            (mini.get_name(), mini.get_default_data_set(),
             mini.get_default_result_type()))
        for i, r in enumerate(mini.get_all_sorted(['acc1'], [True])):
            if 1 not in r.arch_tuple:
                rows.append((r.get_acc1(), str(r.arch_tuple), r.arch_str))
                c += 1
            if c > 9:
                break
        log_in_columns(logger, rows)
예제 #15
0
    def _run(self, save=False):
        # value spaces
        values = set()
        sm = StrategyManager()

        # add all evaluated architectures of the benchmarks
        for bs in self.benchmark_sets:
            assert isinstance(bs, MiniNASTabularBenchmark)
            l0, l1 = len(sm.ordered_names(
                unique=True)), bs.get_value_space().num_choices()
            assert l0 == l1, "Num choices of the network space (%d) and the bench space (%d) must match" % (
                l0, l1)
            for r in bs.get_all():
                values.add(r.arch_tuple)
        if len(values) > 0:
            self.logger.info(
                "Added %d architectures from given benchmark set(s) to the list"
                % len(values))

        # if the space is smaller than desired, add random architectures
        network = self.get_method().get_network()
        assert isinstance(network, SearchUninasNetwork)
        net_space = sm.get_value_space()
        if self.measure_min > len(values):
            self.logger.info("Adding random architectures, have %d/%d" %
                             (len(values), self.measure_min))
            while len(values) < self.measure_min:
                values.add(net_space.random_sample())

        # evaluate the given architectures
        self._architecture_space = SpecificValueSpace(list(values))
        algorithm, population = super()._run(save=save)

        # add info to the candidates, e.g. from profilers, such as loss/flops/latency/macs
        pass

        # create a new bench
        bench = MiniNASSearchTabularBenchmark.make_from_population(
            population, self.get_method())
        log_headline(self.logger, "Created bench file from super-network")
        bench.print_info(self.logger.info)
        bench.save_in_dir(self.save_dir)
        explore(bench, self.logger, n=10)
예제 #16
0
    def run(problem: PymooProblem, algorithm: Algorithm,
            termination: Termination, seed: int, logger: Logger,
            checkpoint_dir: str) -> PymooResultWrapper:
        # run
        log_headline(logger, 'running')
        wrapper = PymooResultWrapper.minimize(
            problem,
            algorithm,
            termination,
            seed=seed,
            pf=problem.pareto_front(use_cache=False),
            save_history=True,
            verbose=True)

        # log, plot
        wrapper.log_best(logger)
        wrapper.plot_all_f(checkpoint_dir)
        wrapper.plot_hv(checkpoint_dir)

        return wrapper
예제 #17
0
def common_s2_prepare_run(logger: Logger, trainer: SimpleTrainer, s1_path: str,
                          tmp_load_path: str, reset_bn: bool,
                          methods: [AbstractMethod]):
    """
    common things done when starting s2
    """
    # reset batch norm, create temporary new weights
    log_headline(logger, "Recovering trained super-net weights")
    trainer[0].load(s1_path)

    if reset_bn:
        logger.info('Resetting batchnorm statistics')
        for method in methods:
            for m in method.get_network().modules():
                if isinstance(m, torch.nn.BatchNorm2d):
                    m.running_mean.zero_()
                    m.running_var.zero_()
                    m.running_var += 1
    trainer[0].save(tmp_load_path)
    methods[0].get_network().set_forward_strategy(False)
예제 #18
0
    def log_detailed(self, logger: Logger):
        def _get_name_rows(s: Union[StrategyManager, AbstractWeightStrategy],
                           prefix: str) -> list:
            order1, order2 = s.ordered_names(unique=False), s.ordered_names(
                unique=True)
            if len(order1) == len(order2):
                return [
                    ('%s weights in request order (all are unique):' % prefix,
                     str(order1)),
                ]
            return [
                ('%s weights in request order (not unique):' % prefix,
                 str(order1)),
                ('%s weights in request order (unique):' % prefix,
                 str(order2)),
            ]

        txt = "Weight strategies" if len(
            self.strategies) > 1 else "Weight strategy"
        log_headline(logger, txt)
        strategies = self.get_strategies_list()
        if len(strategies) > 1:
            rows = _get_name_rows(self, 'all')
            log_in_columns(logger, rows)
            logger.info("")

        for i, strategy in enumerate(strategies):
            if i > 0:
                logger.info("")

            logger.info(strategy.str())
            rows = [("name", "num choices", "used")]
            for r in strategy.get_requested_weights():
                rows.append(
                    (r.name, r.num_choices(), '%dx' % r.num_requests()))
            logger.info("Weights:")
            log_in_columns(logger, rows, add_bullets=True)

            rows = _get_name_rows(strategy, 'strategy')
            log_in_columns(logger, rows)
예제 #19
0
    def _run(self):
        """ execute the task """
        log_headline(self.logger, "Training")
        self.trainer[0].train_until_max_epoch()

        # try to immediately evaluate the result on the maybe-given bench db
        if self.benchmark_set is not None:
            gene = self.get_method().get_network().get_space_tuple(unique=True,
                                                                   flat=True)
            try:
                result = self.benchmark_set.get_by_arch_tuple(gene)
                assert result is not None, "Bench exists, but there if no result for gene %s" % str(
                    gene)
                log_headline(self.logger, "Bench results")
                self.benchmark_set.print_info(self.logger.info)
                result.print(self.logger.info, prefix='\t')
                self.benchmark_set.log_result(result, self.get_method().logger)
            except Exception as e:
                self.logger.warning("Can not load a result from the bench db",
                                    exc_info=e)

        # save configs
        log_headline(self.logger, "Saving config(s)")
        self.get_method().save_configs("%s/network/" %
                                       self.checkpoint_dir(self.save_dir))
예제 #20
0
    def __init__(self, args: Namespace, *args_, **kwargs):
        AbstractTask.__init__(self, args, *args_, **kwargs)

        # for architecture weights
        log_headline(self.logger, 'adding Strategy and Data')
        StrategyManager().add_strategy(RandomChoiceStrategy(max_epochs=1))

        # data
        data_set = self._parsed_meta_argument(Register.data_sets,
                                              'cls_data',
                                              args,
                                              index=None).from_args(args,
                                                                    index=None)
        self.batch_size = data_set.get_batch_size(train=False)

        # device handling
        self.devices_handler = self._parsed_meta_argument(Register.devices_managers, 'cls_device', args, index=None)\
            .from_args(self.seed, self.is_deterministic, args, index=None)
        self.mover = self.devices_handler.allocate_devices(num=-1)

        # network
        log_headline(self.logger, 'adding Network')
        self.net = self._parsed_meta_argument(Register.networks,
                                              'cls_network',
                                              args,
                                              index=None).from_args(args)
        self.net.build(s_in=data_set.get_data_shape(),
                       s_out=data_set.get_label_shape())
        self.net = self.mover.move_module(self.net)

        # profiler
        log_headline(self.logger, 'adding Profiler')
        self.profiler = self._parsed_meta_argument(Register.profilers, 'cls_profiler', args, index=None)\
            .from_args(args, index=None, is_test_run=self.is_test_run)
        assert isinstance(self.profiler, AbstractProfiler)
예제 #21
0
    def prepare(cls: AbstractTask.__class__, logger: Logger, estimator_kwargs: dict, args: Namespace, index=None)\
            -> (AbstractHPO, [], []):
        """
        :param cls:
        :param logger:
        :param estimator_kwargs:
        :param args: global namespace
        :param index: index of the task
        :return: hpo class, constraints, objectives
        """

        # hp optimizer
        try:
            hpo = cls._parsed_meta_argument(Register.hpo_self_algorithms,
                                            'cls_hpo_self_algorithm',
                                            args,
                                            index=index)
            assert issubclass(
                hpo, AbstractHPO
            ), 'Method must have class methods to optimize the arc'
        except:
            hpo = None

        # estimators
        log_headline(logger, 'adding network estimators')
        constraints, objectives = [], []
        for i, e in enumerate(
                cls._parsed_meta_arguments(Register.hpo_estimators,
                                           'cls_hpo_estimators',
                                           args,
                                           index=index)):
            estimator = e(args=args, index=i, **estimator_kwargs)
            if estimator.is_constraint():
                constraints.append(estimator)
            if estimator.is_objective():
                objectives.append(estimator)
            logger.info(estimator.str())
        return hpo, constraints, objectives
예제 #22
0
    def _run(self):
        file_viz = '%s/%s.pdf' % (self.checkpoint_dir(
            self.save_dir), self.hpo.__name__)
        space = SelfHPOUtils.mask_architecture_space(
            self.args, self.benchmark_set.get_value_space())
        algorithm = self.hpo.run_opt(hparams=self.args,
                                     logger=self.logger,
                                     checkpoint_dir=self.checkpoint_dir(
                                         self.save_dir),
                                     value_space=space,
                                     constraints=self.constraints,
                                     objectives=self.objectives)
        population = algorithm.get_total_population(sort=True)
        population.plot(self.objectives[0].key,
                        self.objectives[1].key,
                        show=False,
                        save_path=file_viz,
                        num_fronts=-1)

        if self.plot_true_pareto and not self.hpo.is_full_eval():
            log_headline(
                self.logger,
                'Starting a full evaluation to get the true pareto front')
            full = RandomlyEval(
                value_space=space,
                logger=self.logger,
                save_file='%s/%s.pickle' %
                (self.checkpoint_dir(self.save_dir), RandomlyEval.__name__),
                constraints=self.constraints,
                objectives=self.objectives,
                num_eval=-1)
            full.search(load=True)
            population.add_other_pareto_to_plot(full.population,
                                                self.objectives[0].key,
                                                self.objectives[1].key,
                                                show=False,
                                                save_path=file_viz)
        return algorithm, population
예제 #23
0
    def __init__(self, args: Namespace, *args_, **kwargs):
        AbstractNetTask.__init__(self, args, *args_, **kwargs)

        # args
        self.reset_bn = self._parsed_argument('reset_bn', args)
        self.s1_path = replace_standard_paths(
            self._parsed_argument('s1_path', args))

        # files
        self.tmp_load_path = '%s/checkpoint.tmp.pt' % self.save_dir
        os.makedirs(os.path.dirname(self.tmp_load_path), exist_ok=True)
        shutil.copyfile('%s/data.meta.pt' % self.s1_path,
                        '%s/data.meta.pt' % self.save_dir)

        # one method, one trainer... could be executed in parallel in future?
        log_headline(self.logger, 'setting up...')
        self.add_method()
        self.add_trainer(method=self.get_method(),
                         save_dir=self.save_dir,
                         num_devices=-1)
        self.log_detailed()
        self.get_method().get_network().set_forward_strategy(False)

        # algorithms
        estimator_kwargs = dict(trainer=self.trainer[0],
                                load_path=self.tmp_load_path)
        self.hpo, self.constraints, self.objectives = SelfHPOUtils.prepare(
            self, self.logger, estimator_kwargs, args)

        # arc space
        space = ValueSpace(*[
            DiscreteValues.interval(0, n)
            for n in self.get_method().strategy_manager.get_num_choices(
                unique=True)
        ])
        self._architecture_space = SelfHPOUtils.mask_architecture_space(
            self.args, space)
예제 #24
0
 def finish(self):
     log_headline(self.logger, 'Saving', target_len=80)
     self.selector.cleanup(self._epoch)
     self.selector.log_saves()
     self.selector.plot_results(self.plots_dir, self._log_dicts)
     self.logger.info('-' * 80)
예제 #25
0
    def _initialize_weights(self, net: AbstractModule, logger: logging.Logger):
        assert isinstance(
            net, AbstractUninasNetwork
        ), "This initializer will not work with external networks!"
        search_config = Builder.find_net_config_path(self.path,
                                                     pattern='search')

        checkpoint = CheckpointCallback.load_last_checkpoint(self.path)
        state_dict = checkpoint.get('state_dict')

        # figure out correct weights in super-network checkpoint
        if len(self.gene) > 0:
            log_headline(logger,
                         "tmp network to track used params",
                         target_len=80)
            sm = StrategyManager()
            tmp_s = RandomChoiceStrategy(max_epochs=1, name='__tmp__')
            assert len(sm.get_strategies_list(
            )) == 0, "can not load when there already is a search network"
            sm.add_strategy(tmp_s)
            sm.set_fixed_strategy_name('__tmp__')

            search_net = Builder().load_from_config(search_config)
            assert isinstance(search_net, SearchUninasNetwork)
            s_in, s_out = net.get_shape_in(), net.get_shape_out()
            search_net.build(s_in, s_out[0])
            search_net.set_forward_strategy(False)
            search_net.forward_strategy(fixed_arc=self.gene)
            tracker = search_net.track_used_params(
                s_in.random_tensor(batch_size=2))
            # tracker.print()

            logger.info(' > loading weights of gene %s from checkpoint "%s"' %
                        (str(self.gene), self.path))
            target_dict = net.state_dict()
            target_names = list(target_dict.keys())
            new_dict = {}

            # add all stem and head weights, they are at the front of the dict and have pretty much the same name
            log_columns = [('shape in checkpoint', 'name in checkpoint',
                            'name in network', 'shape in network')]
            for k, v in state_dict.items():
                if '.stem.' in k or '.heads.' in k:
                    tn = target_names.pop(0)
                    ts = target_dict[tn].shape
                    log_columns.append(
                        (str(list(v.shape)), k, tn, str(list(ts))))
                    n = k.replace('net.', '', 1)
                    assert n == tn
                    new_dict[n] = v

            # add all cell weights, can generally not compare names, only shapes
            for i, tracker_cell_entry in enumerate(tracker.get_cells()):
                for entry in tracker_cell_entry.get_pareto_best():
                    tn = target_names.pop(0)
                    ts = target_dict[tn].shape
                    log_columns.append((str(list(entry.shape)), entry.name, tn,
                                        str(list(ts))))
                    assert entry.shape == ts,\
                        'Mismatching shapes for "%s" and "%s", is the gene correct?' % (entry.name, tn)
                    new_dict[tn] = state_dict[entry.name]

            # log matches, load
            log_in_columns(logger, log_columns, add_bullets=True)
            net.load_state_dict(new_dict, strict=self.strict)

            # clean up
            del search_net
            sm.delete_strategy('__tmp__')
            del sm

        # simply load
        else:
            logger.info(' > simply loading state_dict')
            net.load_state_dict(state_dict, strict=self.strict)
예제 #26
0
    def _run(self):
        checkpoint_dir = self.checkpoint_dir(self.save_dir)
        file_plot = '%s/plots/%s-%s/%s/%s_%s.pdf' % (checkpoint_dir, '%d',
                                                     '%d', '%s', '%s', '%s')

        # figure out what each bench has
        data_sets = []
        architectures = []
        for bs in self.benchmark_sets:
            assert isinstance(bs, MiniNASTabularBenchmark)
            data_sets.append(set(bs.get_all_datasets()))
            architectures.append(set(bs.get_all_architecture_tuples()))

        # plot all set intersections
        for i0 in range(len(self.benchmark_sets) - 1):
            for i1 in range(i0 + 1, len(self.benchmark_sets)):
                log_headline(self.logger,
                             "correlating i0=%d and i1=%d" % (i0, i1),
                             target_len=80)
                bench0, bench1 = self.benchmark_sets[i0], self.benchmark_sets[
                    i1]

                self.logger.info("bench[%d]: %s" % (i0, bench0.get_name()))
                self.logger.info("bench[%d]: %s" % (i1, bench1.get_name()))

                # intersection of evaluated architectures
                arc0, arc1 = architectures[i0], architectures[i1]
                arc = list(arc0.intersection(arc1))
                self.logger.info(
                    "num architectures: num bench[%d] = %d, num bench[%d] = %d, num intersection = %d"
                    % (i0, len(arc0), i1, len(arc1), len(arc)))
                if len(arc) == 0:
                    self.logger.info(
                        "skipping, can not correlate any architectures")
                    continue

                # intersection of evaluated data sets
                ds0, ds1 = data_sets[i0], data_sets[i1]
                ds, used_ds = list(ds0.intersection(ds1)), []
                if self.same_dataset:
                    used_ds = [(d, d) for d in ds]
                else:
                    for ds0_ in ds0:
                        for ds1_ in ds1:
                            used_ds.append((ds0_, ds1_))
                self.logger.info(
                    "data sets: bench[%d] = %s, bench[%d] = %s, intersection = %s, used combinations = %s"
                    % (i0, ds0, i1, ds1, ds, used_ds))
                if len(used_ds) == 0:
                    self.logger.info(
                        "skipping, can not correlate any architectures")
                    continue

                # get all relevant results
                results0, results1 = [], []
                for arc_ in arc:
                    results0.append(bench0.get_by_arch_tuple(arc_))
                    results1.append(bench1.get_by_arch_tuple(arc_))

                # correlate
                for ds0_, ds1_ in used_ds:
                    for key in MiniResult.get_metric_keys():
                        name = 'all'
                        ds_str = ds0_ if ds0_ == ds1_ else "%s-%s" % (ds0_,
                                                                      ds1_)

                        type0 = self.benchmark_sets[i0].default_result_type
                        type1 = self.benchmark_sets[i1].default_result_type
                        values0 = [
                            r.get(key, data_set=ds0_, result_type=type0)
                            for r in results0
                        ]
                        values1 = [
                            r.get(key, data_set=ds1_, result_type=type1)
                            for r in results1
                        ]

                        values0 = np.nan_to_num(values0, nan=-1)
                        values1 = np.nan_to_num(values1, nan=-1)

                        # generate plot
                        m = self.correlation_cls[0](
                            column_names=('%s %s' % (bench0.get_name(), type0),
                                          '%s %s' %
                                          (bench1.get_name(), type1)),
                            add_lines=False,
                            can_show=False)
                        m.add_data(values0,
                                   values1,
                                   key,
                                   other_metrics=self.correlation_cls,
                                   s=8)
                        m.plot(legend=True,
                               show=False,
                               save_path=file_plot %
                               (i0, i1, name, key, ds_str))

                        # can not log if there is no exp_logger...
                        """
예제 #27
0
from uninas.utils.paths import replace_standard_paths
from uninas.register import Register


def example_export_network(path: str) -> AbstractUninasNetwork:
    """ create a new network and export it, does not require to have onnx installed """
    network = get_network("FairNasC",
                          Shape([3, 224, 224]),
                          Shape([1000]),
                          weights_path=None)
    network = network.cuda()
    network.export_onnx(path, export_params=True)
    return network


try:
    import onnx

    if __name__ == '__main__':
        logger = LoggerManager().get_logger()
        export_path = replace_standard_paths("{path_tmp}/onnx/FairNasC.onnx")
        net1 = example_export_network(export_path)

        log_headline(logger, "onnx graph")
        net2 = onnx.load(export_path)
        onnx.checker.check_model(net2)
        logger.info(onnx.helper.printable_graph(net2.graph))

except ImportError as e:
    Register.missing_import(e)
예제 #28
0
 def _run(self):
     """ execute the task """
     log_headline(self.logger, "Profiling")
     self.profiler.profile(self.net, self.mover, self.batch_size)
     self.profiler.save(self._profile_file(self.save_dir))