def validate_label_config_on_derived_input_schema( self, config_string_or_parsed_config): """ Validate label config on input schemas (tasks types and data keys) derived from imported tasks :param config_string_or_parsed_config: label config string or parsed config object :return: True if config match already imported tasks """ # check if schema exists, i.e. at least one task has been uploaded if not self.derived_input_schema: return config = config_string_or_parsed_config if isinstance(config, str): config = parse_config(config) input_types, input_values = set(), set() for input_items in map(itemgetter('inputs'), config.values()): for input_item in input_items: input_types.add(input_item['type']) input_values.add(input_item['value']) # check input data values: they must be in schema for item in input_values: if item not in self.derived_input_schema: raise ValidationError( 'You have already imported tasks and they are incompatible with a new config. ' 'You\'ve specified value=${item}, but imported tasks contain only keys: {input_schema_values}' .format(item=item, input_schema_values=list( self.derived_input_schema)))
def main(): """Main function.""" # Parse arguments. args = parse_args() # Parse configurations. config = parse_config(args.config) config = update_config(config, args.options) config.work_dir = args.work_dir config.checkpoint = args.checkpoint config.launcher = args.launcher config.backend = args.backend if not os.path.isfile(config.checkpoint): raise FileNotFoundError(f'Checkpoint file `{config.checkpoint}` is ' f'missing!') # Set CUDNN. config.cudnn_benchmark = config.get('cudnn_benchmark', True) config.cudnn_deterministic = config.get('cudnn_deterministic', False) torch.backends.cudnn.benchmark = config.cudnn_benchmark torch.backends.cudnn.deterministic = config.cudnn_deterministic # Setting for launcher. config.is_distributed = True init_dist(config.launcher, backend=config.backend) config.num_gpus = dist.get_world_size() # Setup logger. if dist.get_rank() == 0: logger_type = config.get('logger_type', 'normal') logger = build_logger(logger_type, work_dir=config.work_dir) shutil.copy(args.config, os.path.join(config.work_dir, 'config.py')) commit_id = os.popen('git rev-parse HEAD').readline() logger.info(f'Commit ID: {commit_id}') else: logger = build_logger('dumb', work_dir=config.work_dir) # Start inference. runner = getattr(runners, config.runner_type)(config, logger) runner.load(filepath=config.checkpoint, running_metadata=False, learning_rate=False, optimizer=False, running_stats=False) if args.synthesis_num > 0: num = args.synthesis_num logger.print() logger.info(f'Synthesizing images ...') runner.synthesize(num, html_name=f'synthesis_{num}.html') logger.info(f'Finish synthesizing {num} images.') if args.fid_num > 0: num = args.fid_num logger.print() logger.info(f'Testing FID ...') fid_value = runner.fid(num, align_tf=not args.use_torchvision) logger.info(f'Finish testing FID on {num} samples. ' f'The result is {fid_value:.6f}.')
def validate_label_config(self, config_string): logger.debug('Validate label config') self.project_obj.validate_label_config(config_string) logger.debug('Get parsed config') parsed_config = parse_config(config_string) logger.debug('Validate label config on derived input schema') self.validate_label_config_on_derived_input_schema(parsed_config) logger.debug('Validate label config on derived output schema') self.validate_label_config_on_derived_output_schema(parsed_config)
def main(): args = parse_args() # Parse configurations. config = parse_config(args.config) os.environ['CUDA_VISIBLE_DEVICES'] = config.gpus timestamp = datetime.datetime.now() version = '%d-%d-%d-%02.0d-%02.0d-%02.0d' % \ (timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute, timestamp.second) config.work_dir = os.path.join(config.work_dir, config.checkpoint_path.split('/')[-3], version) logger_type = config.get('logger_type', 'normal') logger = build_logger(logger_type, work_dir=config.work_dir) shutil.copy(args.config, os.path.join(config.work_dir, 'config.py')) commit_id = os.popen('git rev-parse HEAD').readline() logger.info(f'Commit ID: {commit_id}') runner = SefaRunner(config, logger) runner.run()
def validate_label_config_on_derived_output_schema( self, config_string_or_parsed_config): """ Validate label config on output schema (from_names, to_names and labeling types) derived from completions :param config_string_or_parsed_config: label config string or parsed config object :return: True if config match already created completions """ output_schema = self.derived_output_schema # check if schema exists, i.e. at least one completion has been created if not output_schema['from_name_to_name_type']: return config = config_string_or_parsed_config if isinstance(config, str): config = parse_config(config) completion_tuples = set() for from_name, to in config.items(): completion_tuples.add( (from_name, to['to_name'][0], to['type'].lower())) for from_name, to_name, type in output_schema[ 'from_name_to_name_type']: if (from_name, to_name, type) not in completion_tuples: raise ValidationError( 'You\'ve already completed some tasks, but some of them couldn\'t be loaded with this config: ' 'name={from_name}, toName={to_name}, type={type} are expected' .format(from_name=from_name, to_name=to_name, type=type)) for from_name, expected_label_set in output_schema['labels'].items(): if from_name not in config: raise ValidationError( 'You\'ve already completed some tasks, but some of them couldn\'t be loaded with this config: ' 'name=' + from_name + ' is expected') found_labels = set(config[from_name]['labels']) extra_labels = list(expected_label_set - found_labels) if extra_labels: raise ValidationError( 'You\'ve already completed some tasks, but some of them couldn\'t be loaded with this config: ' 'there are labels already created for "{from_name}":\n{extra_labels}' .format(from_name=from_name, extra_labels=extra_labels))
def main(): """Main function.""" # Parse arguments. args = parse_args() # Parse configurations. config = parse_config(args.config) config = update_config(config, args.options) os.environ['CUDA_VISIBLE_DEVICES'] = config.gpus timestamp = datetime.datetime.now() version = '%d-%d-%d-%02.0d-%02.0d-%02.0d' % \ (timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute, timestamp.second) work_dir = os.path.join(args.work_dir, version) config.work_dir = work_dir config.resume_path = args.resume_path config.weight_path = args.weight_path config.seed = args.seed config.launcher = args.launcher config.backend = args.backend # Set CUDNN. config.cudnn_benchmark = config.get('cudnn_benchmark', True) config.cudnn_deterministic = config.get('cudnn_deterministic', False) torch.backends.cudnn.benchmark = config.cudnn_benchmark torch.backends.cudnn.deterministic = config.cudnn_deterministic # Set random seed. if config.seed is not None: random.seed(config.seed) np.random.seed(config.seed) torch.manual_seed(config.seed) config.cudnn_deterministic = True torch.backends.cudnn.deterministic = True warnings.warn('Random seed is set for training! ' 'This will turn on the CUDNN deterministic setting, ' 'which may slow down the training considerably! ' 'Unexpected behavior can be observed when resuming from ' 'checkpoints.') # Set launcher. config.is_distributed = True init_dist(config.launcher, backend=config.backend) config.num_gpus = dist.get_world_size() # Setup logger. if dist.get_rank() == 0: logger_type = config.get('logger_type', 'normal') logger = build_logger(logger_type, work_dir=config.work_dir) shutil.copy(args.config, os.path.join(config.work_dir, 'config.py')) commit_id = os.popen('git rev-parse HEAD').readline() logger.info(f'Commit ID: {commit_id}') else: logger = build_logger('dumb', work_dir=config.work_dir) # Start training. runner = getattr(runners, config.runner_type)(config, logger) if config.resume_path: runner.load(filepath=config.resume_path, running_metadata=True, learning_rate=True, optimizer=True, running_stats=False) if config.weight_path: runner.load(filepath=config.weight_path, running_metadata=False, learning_rate=False, optimizer=False, running_stats=False) runner.train()
def load_label_config(self): self.label_config_full = config_comments_free( open(self.config['label_config'], encoding='utf8').read()) self.label_config_line = config_line_stripped(self.label_config_full) self.parsed_label_config = parse_config(self.label_config_line) self.input_data_tags = self.get_input_data_tags(self.label_config_line)
def main(): """Main function.""" # Parse arguments. args = parse_args() # Parse configurations. config = parse_config(args.config) config = update_config(config, args.options) config.work_dir = args.work_dir config.resume_path = args.resume_path config.weight_path = args.weight_path config.seed = args.seed config.launcher = args.launcher config.backend = args.backend if args.adv != None: config.loss['g_loss_kwargs']['adv'] = float(args.adv) if args.lamb != None: config.loss['g_loss_kwargs']['lamb'] = float(args.lamb) if args.metric != None: config.loss['g_loss_kwargs']['metric'] = args.metric if args.baseLR != None: config.modules['generator']['opt']['base_lr'] = float(args.baseLR) / 2 if args.nethz != None: config.nethz = args.nethz config.savename = args.adv + '_' + args.lamb.replace( '.', 'dot') + '_' + args.metric.replace( '.', 'dot') + '_' + args.baseLR.replace('.', 'dot') config.data['train'][ 'root_dir'] = '/cluster/scratch/' + config.nethz + '/data' config.data['val'][ 'root_dir'] = '/cluster/scratch/' + config.nethz + '/data' # Set CUDNN. config.cudnn_benchmark = config.get('cudnn_benchmark', True) config.cudnn_deterministic = config.get('cudnn_deterministic', False) torch.backends.cudnn.benchmark = config.cudnn_benchmark torch.backends.cudnn.deterministic = config.cudnn_deterministic # Set random seed. config.seed = 26 if config.seed is not None: random.seed(config.seed) np.random.seed(config.seed) torch.manual_seed(config.seed) config.cudnn_deterministic = True torch.backends.cudnn.deterministic = True warnings.warn('Random seed is set for training! ' 'This will turn on the CUDNN deterministic setting, ' 'which may slow down the training considerably! ' 'Unexpected behavior can be observed when resuming from ' 'checkpoints.') # Set launcher. config.is_distributed = True init_dist(config.launcher, backend=config.backend) config.num_gpus = dist.get_world_size() # Setup logger. if dist.get_rank() == 0: logger_type = config.get('logger_type', 'normal') logger = build_logger(logger_type, work_dir=config.work_dir) shutil.copy(args.config, os.path.join(config.work_dir, 'config.py')) commit_id = os.popen('git rev-parse HEAD').readline() logger.info(f'Commit ID: {commit_id}') else: logger = build_logger('dumb', work_dir=config.work_dir) # Start training. runner = getattr(runners, config.runner_type)(config, logger) if config.resume_path: runner.load(filepath=config.resume_path, running_metadata=True, learning_rate=True, optimizer=True, running_stats=False) if config.weight_path: runner.load(filepath=config.weight_path, running_metadata=False, learning_rate=False, optimizer=False, running_stats=False) runner.train()