Ejemplo n.º 1
0
def _test_resource_allocation_and_time_limit(num_jobs, num_folds_parallel,
                                             time_limit):
    num_cpus = get_cpu_count()
    num_gpus = get_gpu_count_all()
    time_start = time.time()
    fold_fitting_strategy = _construct_dummy_fold_strategy(
        time_limit=time_limit, num_folds_parallel=num_folds_parallel)
    for i in range(num_jobs):
        fold_fitting_strategy.schedule_fold_model_fit(dict())
    resources, batches, num_parallel_jobs = fold_fitting_strategy._get_resource_suggestions(
        len(fold_fitting_strategy.jobs))
    time_elapsed = time.time() - time_start
    time_remaining = time_limit - time_elapsed
    time_limit_fold = fold_fitting_strategy._get_fold_time_limit(
        len(fold_fitting_strategy.jobs))
    num_cpus_per_job = resources.get('num_cpus', 0)
    num_gpus_per_job = resources.get('num_gpus', 0)
    assert batches >= 1
    if batches > 1:
        assert num_jobs <= num_parallel_jobs * batches <= (num_jobs +
                                                           num_parallel_jobs)
    assert num_cpus_per_job * num_parallel_jobs <= num_cpus
    if num_gpus != 0:
        assert num_gpus_per_job * num_parallel_jobs <= num_gpus
    else:
        assert num_gpus_per_job == 0
    assert math.isclose(time_limit_fold, (time_remaining / batches),
                        abs_tol=0.5)
Ejemplo n.º 2
0
    def load(cls, path, verbosity=2):
        """Load previously saved predictor.

        Parameters
        ----------
        path : str
            The file name for saved pickle file. If `path` is a directory, will try to load the file `image_predictor.ag` in
            this directory.
        verbosity : int, default = 2
            Verbosity levels range from 0 to 4 and control how much information is printed.
            Higher levels correspond to more detailed print statements (you can set verbosity = 0 to suppress warnings).
            If using logging, you can alternatively control amount of information printed via logger.setLevel(L),
            where L ranges from 0 to 50 (Note: higher values of L correspond to fewer print statements, opposite of verbosity levels)

        """
        if os.path.isdir(path):
            path = os.path.join(path, 'image_predictor.ag')
        with open(path, 'rb') as fid:
            gpu_count = get_gpu_count_all()
            if gpu_count > 0:
                obj = pickle.load(fid)
            else:
                obj = CPU_Unpickler(fid).load()
        obj._verbosity = verbosity
        return obj
Ejemplo n.º 3
0
    def __init__(self, config=None, logger=None):
        super(ObjectDetection, self).__init__()
        self._fit_summary = {}
        self._logger = logger if logger is not None else logging.getLogger(__name__)
        self._logger.setLevel(logging.INFO)
        self._fit_summary = {}
        self._results = {}

        # cpu and gpu setting
        cpu_count = get_cpu_count()
        gpu_count = get_gpu_count_all()

        # default settings
        if not config:
            if gpu_count < 1:
                self._logger.info('No GPU detected/allowed, using most conservative search space.')
                config = LiteConfig()
            else:
                config = DefaultConfig()
            config = config.asdict()
        else:
            if not config.get('dist_ip_addrs', None):
                ngpus_per_trial = config.get('ngpus_per_trial', gpu_count)
                ngpus_per_trial = min(ngpus_per_trial, gpu_count)
                if ngpus_per_trial < 1:
                    self._logger.info('No GPU detected/allowed, using most conservative search space.')
                    default_config = LiteConfig()
                else:
                    default_config = DefaultConfig()
                config = default_config.merge(config, allow_new_key=True).asdict()

        # adjust cpu/gpu resources
        if not config.get('dist_ip_addrs', None):
            nthreads_per_trial = config.get('nthreads_per_trial', cpu_count)
            nthreads_per_trial = min(nthreads_per_trial, cpu_count)
            ngpus_per_trial = config.get('ngpus_per_trial', gpu_count)
            if ngpus_per_trial > gpu_count:
                ngpus_per_trial = gpu_count
                self._logger.warning(
                    "The number of requested GPUs is greater than the number of available GPUs."
                    "Reduce the number to %d", ngpus_per_trial)
        else:
            raise ValueError('Please specify `nthreads_per_trial` and `ngpus_per_trial` '
                             'given that dist workers are available')

        # fix estimator-transfer relationship
        estimator = config.get('estimator', None)
        transfer = config.get('transfer', None)
        if estimator is not None and transfer is not None:
            if isinstance(estimator, ag.Space):
                estimator = estimator.data
            elif isinstance(estimator, str):
                estimator = [estimator]
            if isinstance(transfer, ag.Space):
                transfer = transfer.data
            elif isinstance(transfer, str):
                transfer = [transfer]

            valid_transfer = []
            for e in estimator:
                for t in transfer:
                    if e in t:
                        valid_transfer.append(t)

            if not valid_transfer:
                raise ValueError(f'No matching `transfer` model for {estimator}')
            if len(valid_transfer) == 1:
                config['transfer'] = valid_transfer[0]
            else:
                config['transfer'] = ag.Categorical(*valid_transfer)

        # additional configs
        config['num_workers'] = nthreads_per_trial
        config['gpus'] = [int(i) for i in range(ngpus_per_trial)]
        config['seed'] = config.get('seed', np.random.randint(32,767))
        config['final_fit'] = False
        self._cleanup_disk = config.get('cleanup_disk', True)
        self._config = config

        # scheduler options
        self.search_strategy = config.get('search_strategy', 'random')
        self.search_options = config.get('search_options', {})
        self.scheduler_options = {
            'resource': {'num_cpus': nthreads_per_trial, 'num_gpus': ngpus_per_trial},
            'checkpoint': config.get('checkpoint', 'checkpoint/exp1.ag'),
            'num_trials': config.get('num_trials', 2),
            'time_out': config.get('time_limits', 60 * 60),
            'resume': (len(config.get('resume', '')) > 0),
            'visualizer': config.get('visualizer', 'none'),
            'time_attr': 'epoch',
            'reward_attr': 'map_reward',
            'dist_ip_addrs': config.get('dist_ip_addrs', None),
            'searcher': self.search_strategy,
            'search_options': self.search_options,
            'max_reward': config.get('max_reward', 0.9)}
Ejemplo n.º 4
0
 def _get_num_gpus_available():
     return get_gpu_count_all()
Ejemplo n.º 5
0
    def __init__(self, config=None, logger=None, problem_type=None):
        super(ImageClassification, self).__init__()
        if problem_type is None:
            problem_type = MULTICLASS
        self._problem_type = problem_type
        self._fit_summary = {}
        self._logger = logger if logger is not None else logging.getLogger(__name__)
        self._logger.setLevel(logging.INFO)
        self._fit_summary = {}
        self._results = {}


        # cpu and gpu setting
        cpu_count = get_cpu_count()
        gpu_count = get_gpu_count_all()

        # default settings
        if not config:
            if gpu_count < 1:
                self._logger.info('No GPU detected/allowed, using most conservative search space.')
                config = LiteConfig()
            else:
                config = DefaultConfig()
            config = config.asdict()
        else:
            if not config.get('dist_ip_addrs', None):
                ngpus_per_trial = config.get('ngpus_per_trial', gpu_count)
                ngpus_per_trial = min(ngpus_per_trial, gpu_count)
                if ngpus_per_trial < 1:
                    self._logger.info('No GPU detected/allowed, using most conservative search space.')
                    default_config = LiteConfig()
                else:
                    default_config = DefaultConfig()
                config = default_config.merge(config, allow_new_key=True).asdict()

        # adjust cpu/gpu resources
        if not config.get('dist_ip_addrs', None):
            nthreads_per_trial = config.get('nthreads_per_trial', cpu_count)
            nthreads_per_trial = min(nthreads_per_trial, cpu_count)
            ngpus_per_trial = config.get('ngpus_per_trial', gpu_count)
            if ngpus_per_trial > gpu_count:
                ngpus_per_trial = gpu_count
                self._logger.warning(
                    "The number of requested GPUs is greater than the number of available GPUs."
                    "Reduce the number to %d", ngpus_per_trial)
        else:
            raise ValueError('Please specify `nthreads_per_trial` and `ngpus_per_trial` '
                             'given that dist workers are available')


        # additional configs
        config['num_workers'] = nthreads_per_trial
        config['gpus'] = [int(i) for i in range(ngpus_per_trial)]
        config['seed'] = config.get('seed', np.random.randint(32,767))
        config['final_fit'] = False
        self._cleanup_disk = config.get('cleanup_disk', True)
        self._config = config

        # scheduler options
        self.scheduler = config.get('scheduler', 'local')
        self.search_strategy = config.get('search_strategy', 'random')
        self.search_options = config.get('search_options', {})
        self.scheduler_options = {
            'resource': {'num_cpus': nthreads_per_trial, 'num_gpus': ngpus_per_trial},
            'checkpoint': config.get('checkpoint', 'checkpoint/exp1.ag'),
            'num_trials': config.get('num_trials', 2),
            'time_out': config.get('time_limits', 60 * 60),
            'resume': (len(config.get('resume', '')) > 0),
            'visualizer': config.get('visualizer', 'none'),
            'time_attr': 'epoch',
            'reward_attr': 'acc_reward',
            'dist_ip_addrs': config.get('dist_ip_addrs', None),
            'searcher': self.search_strategy,
            'search_options': self.search_options,
            'max_reward': config.get('max_reward', 0.95)}