def read_config(self, config_params, datasets_index, **kwargs): """ Read the config file and the datasets index Between the config file and the dataset index, we have enough information to configure the backend and the models. We can also initialize the data readers :param config_file: The config file :param datasets_index: The index of datasets :return: """ datasets_index = read_config_file_or_json(datasets_index, 'datasets') datasets_set = index_by_label(datasets_index) self.config_params = config_params basedir = self.get_basedir() if basedir is not None and not os.path.exists(basedir): logger.info('Creating: {}'.format(basedir)) os.mkdir(basedir) self.config_params['train']['basedir'] = basedir # Read GPUS from env variables now so that the reader has access if self.config_params['model'].get('gpus', 1) == -1: self.config_params['model']['gpus'] = len(get_env_gpus()) self.config_file = kwargs.get('config_file') self._setup_task(**kwargs) self._load_user_modules() self._configure_reporting(config_params.get('reporting', {}), **kwargs) self.dataset = datasets_set[self.config_params['dataset']] self.reader = self._create_task_specific_reader()
def read_config(self, config_params, datasets_index, **kwargs): """ Read the config file and the datasets index Between the config file and the dataset index, we have enough information to configure the backend and the models. We can also initialize the data readers :param config_file: The config file :param datasets_index: The index of datasets :return: """ datasets_index = read_config_file_or_json(datasets_index, 'datasets') datasets_set = index_by_label(datasets_index) self.config_params = config_params config_file = deepcopy(config_params) basedir = self.get_basedir() if basedir is not None and not os.path.exists(basedir): logger.info('Creating: %s', basedir) os.makedirs(basedir) self.config_params['train']['basedir'] = basedir # Read GPUS from env variables now so that the reader has access if self.config_params['model'].get('gpus', -1) == -1: self.config_params['model']['gpus'] = len(get_env_gpus()) self._setup_task(**kwargs) self._load_user_modules() self.dataset = get_dataset_from_key(self.config_params['dataset'], datasets_set) # replace dataset in config file by the latest dataset label, this will be used by some reporting hooks config_file['dataset'] = self.dataset['label'] self._configure_reporting(config_params.get('reporting', {}), config_file=config_file, **kwargs) self.reader = self._create_task_specific_reader()
def read_config(self, config_params, datasets_index, **kwargs): """ Read the config file and the datasets index Between the config file and the dataset index, we have enough information to configure the backend and the models. We can also initialize the data readers :param config_file: The config file :param datasets_index: The index of datasets :return: """ datasets_index = read_config_file_or_json(datasets_index, 'datasets') datasets_set = index_by_label(datasets_index) self.config_params = config_params basedir = self.get_basedir() if basedir is not None and not os.path.exists(basedir): logger.info('Creating: %s', basedir) os.makedirs(basedir) self.config_params['train']['basedir'] = basedir # Read GPUS from env variables now so that the reader has access if self.config_params['model'].get('gpus', -1) == -1: self.config_params['model']['gpus'] = len(get_env_gpus()) self.config_file = kwargs.get('config_file') self._setup_task(**kwargs) self._load_user_modules() self._configure_reporting(config_params.get('reporting', {}), **kwargs) self.dataset = get_dataset_from_key(self.config_params['dataset'], datasets_set) self.reader = self._create_task_specific_reader()
def test_none(remove_envs): gold = ['0'] gpus = get_env_gpus() assert gpus == gold
def test_visible_first(cuda_visible, nv_gpu): gpus = get_env_gpus() assert gpus != nv_gpu assert gpus == cuda_visible
def test_visible(cuda_visible): gpus = get_env_gpus() assert gpus == cuda_visible