def __init__(self, fn): self.fn = fn self._config = {} nthreads_per_trial = get_cpu_count() ngpus_per_trial = get_gpu_count() self._config['scheduler'] = self._config.get('scheduler', 'fifo') self._config['scheduler_options'] = { 'resource': { 'num_cpus': nthreads_per_trial, 'num_gpus': ngpus_per_trial }, 'num_trials': self._config.get('num_trials', 2), 'time_out': self._config.get('time_limits', 60 * 60), 'time_attr': 'epoch', 'reward_attr': 'accuracy', 'searcher': self._config.get('searcher', 'random'), 'search_options': self._config.get('search_options', None) } if self._config['scheduler'] == 'hyperband': self._config['scheduler_options'].update({ 'searcher': 'random', 'max_t': self._config.get('epochs', 50), 'grace_period': self._config.get('grace_period', 100) })
def test_frcnn_estimator(): from gluoncv.auto.estimators import FasterRCNNEstimator est = FasterRCNNEstimator({'train': {'epochs': 1}, 'gpus': list(range(get_gpu_count()))}) res = est.fit(OBJECT_DETCTION_DATASET) assert res.get('valid_map', 0) > 0 _, _, test_data = OBJECT_DETCTION_DATASET.random_split() test_result = est.predict(test_data) evaluate_result = est.evaluate(test_data)
def test_frcnn_estimator(): from gluoncv.auto.estimators import FasterRCNNEstimator est = FasterRCNNEstimator({ 'train': { 'epochs': 1 }, 'gpus': list(range(get_gpu_count())) }) res = est.fit(OBJECT_DETCTION_DATASET) assert res.get('valid_map', 0) > 0
def test_image_classification_estimator(): from gluoncv.auto.estimators import ImageClassificationEstimator est = ImageClassificationEstimator({ 'train': { 'epochs': 1, 'batch_size': 8 }, 'gpus': list(range(get_gpu_count())) }) res = est.fit(IMAGE_CLASS_DATASET) assert res.get('valid_acc', 0) > 0
def test_yolo3_estimator(): from gluoncv.auto.estimators import YOLOv3Estimator est = YOLOv3Estimator({ 'train': { 'epochs': 1, 'batch_size': 8 }, 'gpus': list(range(get_gpu_count())) }) res = est.fit(OBJECT_DETCTION_DATASET) assert res.get('valid_map', 0) > 0
def test_ssd_estimator(): from gluoncv.auto.estimators import SSDEstimator est = SSDEstimator({ 'train': { 'epochs': 1, 'batch_size': 8 }, 'gpus': list(range(get_gpu_count())) }) res = est.fit(OBJECT_DETECTION_TRAIN) assert res.get('valid_map', 0) > 0 test_result = est.predict(OBJECT_DETECTION_TEST) evaluate_result = est.evaluate(OBJECT_DETECTION_VAL)
def test_frcnn_estimator(): from gluoncv.auto.estimators import FasterRCNNEstimator est = FasterRCNNEstimator({'train': {'epochs': 1}, 'gpus': list(range(get_gpu_count()))}) OBJECT_DETECTION_TRAIN_MINI, OBJECT_DETECTION_VAL_MINI, OBJECT_DETECTION_TEST_MINI = OBJECT_DETECTION_TRAIN.random_split( val_size=0.3, test_size=0.2) res = est.fit(OBJECT_DETECTION_TRAIN_MINI) assert res.get('valid_map', 0) > 0 test_result = est.predict(OBJECT_DETECTION_TEST_MINI) est.predict(OBJECT_DETECTION_TEST.iloc[0]['image']) with Image.open(OBJECT_DETECTION_TEST.iloc[0]['image']) as pil_image: est.predict(pil_image) evaluate_result = est.evaluate(OBJECT_DETECTION_VAL_MINI) # test save/load _save_load_test(est, 'frcnn.pkl')
def test_yolo3_estimator(): from gluoncv.auto.estimators import YOLOv3Estimator est = YOLOv3Estimator({ 'train': { 'epochs': 1, 'batch_size': 8 }, 'gpus': list(range(get_gpu_count())) }) res = est.fit(OBJECT_DETCTION_DATASET) assert res.get('valid_map', 0) > 0 _, _, test_data = OBJECT_DETCTION_DATASET.random_split() test_result = est.predict(test_data) evaluate_result = est.evaluate(test_data)
def test_frcnn_estimator(): from gluoncv.auto.estimators import FasterRCNNEstimator est = FasterRCNNEstimator({ 'train': { 'epochs': 1 }, 'gpus': list(range(get_gpu_count())) }) OBJECT_DETECTION_TRAIN_MINI, OBJECT_DETECTION_VAL_MINI, OBJECT_DETECTION_TEST_MINI = OBJECT_DETECTION_TRAIN.random_split( val_size=0.3, test_size=0.2) res = est.fit(OBJECT_DETECTION_TRAIN_MINI) assert res.get('valid_map', 0) > 0 test_result = est.predict(OBJECT_DETECTION_TEST_MINI) evaluate_result = est.evaluate(OBJECT_DETECTION_VAL_MINI)
def test_ssd_estimator(): from gluoncv.auto.estimators import SSDEstimator est = SSDEstimator({'train': {'epochs': 1, 'batch_size': 8}, 'gpus': list(range(get_gpu_count()))}) res = est.fit(OBJECT_DETECTION_TRAIN) assert res.get('valid_map', 0) > 0 test_result = est.predict(OBJECT_DETECTION_TEST) est.predict(OBJECT_DETECTION_TEST.iloc[0]['image']) with Image.open(OBJECT_DETECTION_TEST.iloc[0]['image']) as pil_image: est.predict(pil_image) evaluate_result = est.evaluate(OBJECT_DETECTION_VAL) # test save/load est2 = _save_load_test(est, 'ssd.pkl') evaluate_result2 = est2.evaluate(OBJECT_DETECTION_VAL) np.testing.assert_array_equal(evaluate_result, evaluate_result2, err_msg=f'{evaluate_result} != \n {evaluate_result2}')
def build_config(pretrained, global_pool_type, sync_bn, no_aug, mixup, cutmix, model_ema, model_ema_force_cpu, save_images, pin_mem, use_multi_epochs_loader, amp, apex_amp, native_amp, prefetcher, interpolation, batch_size, hflip, vflip, train_interpolation, num_workers, tta): config = { 'model': { 'model': 'resnet50', 'pretrained': pretrained, 'global_pool_type': global_pool_type, }, 'dataset': { 'interpolation': interpolation }, 'train': { 'batch_size': batch_size, 'sync_bn': sync_bn, }, 'augmentation': { 'no_aug': no_aug, 'mixup': mixup, 'cutmix': cutmix, 'hflip': hflip, 'vflip': vflip, 'train_interpolation': train_interpolation, }, 'model_ema': { 'model_ema': model_ema, 'model_ema_force_cpu': model_ema_force_cpu, }, 'misc': { 'num_workers': num_workers, 'save_images': save_images, 'pin_mem': pin_mem, 'tta': tta, 'use_multi_epochs_loader': use_multi_epochs_loader, 'amp': amp, 'apex_amp': apex_amp, 'native_amp': native_amp, 'prefetcher': prefetcher, } } if config['augmentation']['mixup'] or config['augmentation']['cutmix']: config['train']['batch_size'] = 2 config['train']['epochs'] = 1 config['gpus'] = list(range(get_gpu_count())) config['misc'][ 'apex_amp'] = False # apex amp cause mem leak: https://github.com/NVIDIA/apex/issues/439 return config
def test_center_net_estimator(): from gluoncv.auto.estimators import CenterNetEstimator est = CenterNetEstimator({ 'train': { 'epochs': 1, 'batch_size': 8 }, 'gpus': list(range(get_gpu_count())) }) res = est.fit(OBJECT_DETECTION_TRAIN) assert res.get('valid_map', 0) > 0 test_result = est.predict(OBJECT_DETECTION_TEST) evaluate_result = est.evaluate(OBJECT_DETECTION_VAL) # test save/load _save_load_test(est, 'center_net.pkl')
def test_image_classification_estimator(): from gluoncv.auto.estimators import ImageClassificationEstimator est = ImageClassificationEstimator({ 'train': { 'epochs': 1, 'batch_size': 8 }, 'gpus': list(range(get_gpu_count())) }) res = est.fit(IMAGE_CLASS_DATASET) assert res.get('valid_acc', 0) > 0 test_result = est.predict(IMAGE_CLASS_TEST) evaluate_result = est.evaluate(IMAGE_CLASS_TEST) feature = est.predict_feature(IMAGE_CLASS_TEST) # test save/load _save_load_test(est, 'imgcls.pkl')
def test_yolo3_estimator(): from gluoncv.auto.estimators import YOLOv3Estimator est = YOLOv3Estimator({ 'train': { 'epochs': 1, 'batch_size': 8 }, 'gpus': list(range(get_gpu_count())) }) res = est.fit(OBJECT_DETECTION_TRAIN) assert res.get('valid_map', 0) > 0 test_result = est.predict(OBJECT_DETECTION_TEST) est.predict(OBJECT_DETECTION_TEST.iloc[0]['image']) evaluate_result = est.evaluate(OBJECT_DETECTION_VAL) # test save/load _save_load_test(est, 'yolo3.pkl')
def test_image_classification_estimator(): est = TorchImageClassificationEstimator({ 'img_cls': { 'model': 'resnet18' }, 'train': { 'epochs': 1 }, 'gpus': list(range(get_gpu_count())) }) res = est.fit(IMAGE_CLASS_DATASET) est.predict(IMAGE_CLASS_TEST) est.predict(IMAGE_CLASS_TEST.iloc[0]['image']) est.evaluate(IMAGE_CLASS_DATASET) est.predict_feature(IMAGE_CLASS_TEST) est.predict_feature(IMAGE_CLASS_TEST.iloc[0]['image']) _save_load_test(est, 'test.pkl')
def test_image_classification_estimator_custom_net(): from gluoncv.auto.estimators import ImageClassificationEstimator from gluoncv.model_zoo import get_model net = get_model('resnet18_v1') est = ImageClassificationEstimator( { 'train': { 'epochs': 1, 'batch_size': 8 }, 'gpus': list(range(get_gpu_count())) }, net=net) res = est.fit(IMAGE_CLASS_DATASET) assert res.get('valid_acc', 0) > 0 test_result = est.predict(IMAGE_CLASS_TEST) evaluate_result = est.evaluate(IMAGE_CLASS_TEST) feature = est.predict_feature(IMAGE_CLASS_TEST)
def test_image_regression_estimator(): est = TorchImageClassificationEstimator( { 'img_cls': { 'model': 'resnet18' }, 'train': { 'epochs': 1 }, 'gpus': list(range(get_gpu_count())) }, problem_type='regression') res = est.fit(IMAGE_REGRESS_DATASET) assert res.get('valid_score', 3) < 3 est.predict(IMAGE_REGRESS_TEST) est.predict(IMAGE_REGRESS_TEST.iloc[0]['image']) est.evaluate(IMAGE_REGRESS_TEST) est.predict_feature(IMAGE_REGRESS_TEST) est.predict_feature(IMAGE_REGRESS_TEST.iloc[0]['image']) # test save/load _save_load_test(est, 'img_regression.pkl')
def test_image_classification_estimator_custom_net(): import timm import torch.nn as nn m = timm.create_model('resnet18') m.fc = nn.Linear(512, 4) est = TorchImageClassificationEstimator( { 'img_cls': { 'model': 'resnet18' }, 'train': { 'epochs': 1 }, 'gpus': list(range(get_gpu_count())) }, net=m) res = est.fit(IMAGE_CLASS_DATASET) est.predict(IMAGE_CLASS_TEST) est.predict(IMAGE_CLASS_TEST.iloc[0]['image']) est.evaluate(IMAGE_CLASS_DATASET) est.predict_feature(IMAGE_CLASS_TEST) est.predict_feature(IMAGE_CLASS_TEST.iloc[0]['image']) _save_load_test(est, 'test.pkl')
def test_image_classification_estimator_custom_net_optimizer(): from gluoncv.auto.estimators import ImageClassificationEstimator from gluoncv.model_zoo import get_model from mxnet.optimizer import Adam net = get_model('resnet18_v1') optim = Adam(learning_rate=0.01, wd=1e-3) est = ImageClassificationEstimator( { 'train': { 'epochs': 1, 'batch_size': 8 }, 'gpus': list(range(get_gpu_count())) }, net=net, optimizer=optim) res = est.fit(IMAGE_CLASS_DATASET) assert res.get('valid_acc', 0) > 0 feat = est.predict_feature(IMAGE_CLASS_TEST) est.save('test_image_classification.pkl') est = ImageClassificationEstimator.load('test_image_classification.pkl') test_result = est.predict(IMAGE_CLASS_TEST) evaluate_result = est.evaluate(IMAGE_CLASS_TEST) feature = est.predict_feature(IMAGE_CLASS_TEST)
def __init__(self, config=None, estimator=None, logger=None): super(ImageClassification, self).__init__() self._fit_summary = {} self._logger = logger if logger is not None else logging.getLogger( __name__) self._logger.setLevel(logging.INFO) # cpu and gpu setting cpu_count = get_cpu_count() gpu_count = get_gpu_count() # default settings if not config: if gpu_count < 1: self._logger.info( 'No GPU detected/allowed, using most conservative search space.' ) config = LiteConfig() else: config = DefaultConfig() config = config.asdict() else: if not config.get('dist_ip_addrs', None): ngpus_per_trial = config.get('ngpus_per_trial', gpu_count) if ngpus_per_trial < 1: self._logger.info( 'No GPU detected/allowed, using most conservative search space.' ) default_config = LiteConfig() else: default_config = DefaultConfig() config = default_config.merge(config, allow_new_key=True).asdict() # adjust cpu/gpu resources if not config.get('dist_ip_addrs', None): nthreads_per_trial = config.get('nthreads_per_trial', cpu_count) if nthreads_per_trial > cpu_count: nthreads_per_trial = cpu_count ngpus_per_trial = config.get('ngpus_per_trial', gpu_count) if ngpus_per_trial > gpu_count: ngpus_per_trial = gpu_count self._logger.warning( "The number of requested GPUs is greater than the number of available GPUs." "Reduce the number to %d", ngpus_per_trial) else: raise ValueError( 'Please specify `nthreads_per_trial` and `ngpus_per_trial` ' 'given that dist workers are available') # additional configs config['num_workers'] = nthreads_per_trial config['gpus'] = [int(i) for i in range(ngpus_per_trial)] if config['gpus']: config['batch_size'] = config.get('batch_size', 8) * len( config['gpus']) self._logger.info( 'Increase batch size to %d based on the number of gpus %d', config['batch_size'], len(config['gpus'])) config['seed'] = config.get('seed', np.random.randint(32, 767)) self._config = config # scheduler options self.search_strategy = config.get('search_strategy', 'random') self.scheduler_options = { 'resource': { 'num_cpus': nthreads_per_trial, 'num_gpus': ngpus_per_trial }, 'checkpoint': config.get('checkpoint', 'checkpoint/exp1.ag'), 'num_trials': config.get('num_trials', 2), 'time_out': config.get('time_limits', 60 * 60), 'resume': (len(config.get('resume', '')) > 0), 'visualizer': config.get('visualizer', 'none'), 'time_attr': 'epoch', 'reward_attr': 'acc_reward', 'dist_ip_addrs': config.get('dist_ip_addrs', None), 'searcher': self.search_strategy, 'search_options': config.get('search_options', None) } if self.search_strategy == 'hyperband': self.scheduler_options.update({ 'searcher': 'random', 'max_t': config.get('epochs', 50), 'grace_period': config.get('grace_period', config.get('epochs', 50) // 4) })
def __init__(self, config=None, logger=None): super(ObjectDetection, self).__init__() self._fit_summary = {} self._logger = logger if logger is not None else logging.getLogger(__name__) self._logger.setLevel(logging.INFO) # cpu and gpu setting cpu_count = get_cpu_count() gpu_count = get_gpu_count() # default settings if not config: if gpu_count < 1: self._logger.info('No GPU detected/allowed, using most conservative search space.') config = LiteConfig() else: config = DefaultConfig() config = config.asdict() else: if not config.get('dist_ip_addrs', None): ngpus_per_trial = config.get('ngpus_per_trial', gpu_count) if ngpus_per_trial > gpu_count: ngpus_per_trial = gpu_count if ngpus_per_trial < 1: self._logger.info('No GPU detected/allowed, using most conservative search space.') default_config = LiteConfig() else: default_config = DefaultConfig() config = default_config.merge(config, allow_new_key=True).asdict() # adjust cpu/gpu resources if not config.get('dist_ip_addrs', None): nthreads_per_trial = config.get('nthreads_per_trial', cpu_count) if nthreads_per_trial > cpu_count: nthreads_per_trial = cpu_count ngpus_per_trial = config.get('ngpus_per_trial', gpu_count) if ngpus_per_trial > gpu_count: ngpus_per_trial = gpu_count self._logger.warning( "The number of requested GPUs is greater than the number of available GPUs." "Reduce the number to %d", ngpus_per_trial) else: raise ValueError('Please specify `nthreads_per_trial` and `ngpus_per_trial` ' 'given that dist workers are available') # fix estimator-transfer relationship estimator = config.get('estimator', None) transfer = config.get('transfer', None) if estimator is not None and transfer is not None: if isinstance(estimator, ag.Space): estimator = estimator.data elif isinstance(estimator, str): estimator = [estimator] if isinstance(transfer, ag.Space): transfer = transfer.data elif isinstance(transfer, str): transfer = [transfer] valid_transfer = [] for e in estimator: for t in transfer: if e in t: valid_transfer.append(t) if not valid_transfer: raise ValueError(f'No matching `transfer` model for {estimator}') if len(valid_transfer) == 1: config['transfer'] = valid_transfer[0] else: config['transfer'] = ag.Categorical(*valid_transfer) # additional configs config['num_workers'] = nthreads_per_trial config['gpus'] = [int(i) for i in range(ngpus_per_trial)] config['seed'] = config.get('seed', np.random.randint(32,767)) self._config = config # scheduler options self.search_strategy = config.get('search_strategy', 'random') self.search_options = config.get('search_options', None) if self.search_options: self.search_options.update({'debug_log': True}) else: self.search_options = {'debug_log': True} self.scheduler_options = { 'resource': {'num_cpus': nthreads_per_trial, 'num_gpus': ngpus_per_trial}, 'checkpoint': config.get('checkpoint', 'checkpoint/exp1.ag'), 'num_trials': config.get('num_trials', 2), 'time_out': config.get('time_limits', 60 * 60), 'resume': (len(config.get('resume', '')) > 0), 'visualizer': config.get('visualizer', 'none'), 'time_attr': 'epoch', 'reward_attr': 'map_reward', 'dist_ip_addrs': config.get('dist_ip_addrs', None), 'searcher': self.search_strategy, 'search_options': self.search_options} if self.search_strategy == 'hyperband': self.scheduler_options.update({ 'searcher': 'random', 'max_t': config.get('epochs', 50), 'grace_period': config.get('grace_period', config.get('epochs', 50) // 4)}) elif self.search_strategy == 'bayesopt_hyperband': self.scheduler_options.update({ 'searcher': 'bayesopt', 'max_t': config.get('epochs', 50), 'grace_period': config.get('grace_period', config.get('epochs', 50) // 4)})
def test_image_regression_estimator(): from gluoncv.auto.estimators import ImageClassificationEstimator est = ImageClassificationEstimator({'train': {'epochs': 3, 'batch_size': 8}, 'gpus': list(range(get_gpu_count()))}, problem_type='regression') res = est.fit(IMAGE_REGRESS_DATASET) assert res.get('valid_score', 3) < 3 test_result = est.predict(IMAGE_REGRESS_TEST) test_result = est.predict(IMAGE_REGRESS_TEST, with_proba=True) est.predict(IMAGE_REGRESS_TEST.iloc[0]['image']) evaluate_result = est.evaluate(IMAGE_REGRESS_TEST, metric_name='rmse') feature = est.predict_feature(IMAGE_REGRESS_TEST) est.predict_feature(IMAGE_REGRESS_TEST.iloc[0]['image']) # test save/load _save_load_test(est, 'img_regression.pkl')