def __init__(self, model, batch_processor, name=None, optimizer=None, work_dir=None, log_level=logging.INFO, logger=None, amp_enabled=False, loss_weights={'rpn_class_loss': 1., 'rpn_bbox_loss': 1., 'rcnn_class_loss': 1., 'rcnn_bbox_loss': 1., 'reg_loss': 1.}): assert callable(batch_processor) self.model = model self.optimizer = optimizer self.batch_processor = batch_processor self.loss_weights = loss_weights # create work_dir if is_str(work_dir): self.work_dir = osp.abspath(work_dir) mkdir_or_exist(self.work_dir) elif work_dir is None: self.work_dir = None else: raise TypeError('"work_dir" must be a str or None') # get model name from the model class self._model_name = self.model.__class__.__name__ if name: self._model_name = name self._rank, self._local_rank, self._world_size, self._local_size = get_dist_info() self.timestamp = get_time_str() if logger is None: self.logger = self.init_logger(work_dir, log_level) else: self.logger = logger self.log_buffer = LogBuffer() self.mode = None self._hooks = [] self._epoch = 0 self._iter = 0 self._inner_iter = 0 self._max_epochs = 0 self._max_iters = 0 self._amp_enabled = amp_enabled
def __init__(self, model, batch_processor, optimizer=None, work_dir=None, log_level=logging.INFO, logger=None, amp_enabled=False, gradient_clip=15.0): assert callable(batch_processor) self.model = model if optimizer is not None: self.optimizer = optimizer else: self.optimizer = None self.batch_processor = batch_processor # create work_dir if isinstance(work_dir, str): self.work_dir = osp.abspath(work_dir) mkdir_or_exist(self.work_dir) elif work_dir is None: self.work_dir = None else: raise TypeError('"work_dir" must be a str or None') # get model name from the model class self._model_name = self.model.__class__.__name__ self._rank, self._local_rank, self._world_size, self._local_size = get_dist_info( ) self.timestamp = get_time_str() if logger is None: self.logger = self.init_logger(work_dir, log_level) else: self.logger = logger self.log_buffer = LogBuffer() self.mode = None self._hooks = [] self._epoch = 0 self._iter = 0 self._inner_iter = 0 self._max_epochs = 0 self._max_iters = 0 self._amp_enabled = amp_enabled self.gradient_clip = gradient_clip # <= 0.0 disables it
def main(): args = parse_args() num_gpus = len(gpus) cfg = Config.fromfile(args.config) # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from if args.autoscale_lr: # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) total_bs = len(gpus) * cfg.data.imgs_per_gpu cfg.optimizer['learning_rate'] = \ cfg.optimizer['learning_rate'] * total_bs / 8 # init distributed env first, since logger depends on the dist info. init_dist() if not gpus: distributed = False # single node single gpu else: distributed = True # create work_dir mkdir_or_exist(osp.abspath(cfg.work_dir)) # init the logger before other steps timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp)) logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) # log some basic info logger.info('Distributed training: {}'.format(distributed)) logger.info('TF MMDetection Version: {}'.format(__version__)) logger.info('Config:\n{}'.format(cfg.text)) logger.info('Tensorflow version: {}'.format(tf.version.VERSION)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}, deterministic: {}'.format( args.seed, args.deterministic)) set_random_seed(args.seed + get_dist_info()[0], deterministic=args.deterministic) model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) # dummy data to init network padded_img_side = max(cfg.data.train['scale']) img = tf.random.uniform(shape=[padded_img_side, padded_img_side, 3], dtype=tf.float32) img_meta = tf.constant( [465., 640., 3., 800., 1101., 3., float(padded_img_side), float(padded_img_side), 3., 1.7204301, 0.], dtype=tf.float32) # bboxes = tf.constant([[1.0, 1.0, 10.0, 10.0]], dtype=tf.float32) # labels = tf.constant([1], dtype=tf.int32) _ = model((tf.expand_dims(img, axis=0), tf.expand_dims(img_meta, axis=0)), training=False) # print('BEFORE:', model.layers[0].layers[0].get_weights()[0][0,0,0,:]) weights_path = cfg.model['backbone']['weights_path'] logger.info('Loading weights from: {}'.format(weights_path)) model.layers[0].layers[0].load_weights(weights_path, by_name=True, skip_mismatch=True) #by_name=False) # print('AFTER:',model.layers[0].layers[0].get_weights()[0][0,0,0,:]) print_model_info(model, logger) datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: datasets.append(build_dataset(cfg.data.val)) datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) > 1: raise NotImplementedError train_detector(model, datasets, cfg, num_gpus=num_gpus, distributed=distributed, mixed_precision=args.amp, validate=args.validate, timestamp=timestamp)
def main_sagemaker(args, cfg): """ Main training entry point for jobs launched via SageMaker """ instance_name = cfg.sagemaker_job['job_name'] s3_path = cfg.sagemaker_job['s3_path'] decompress_data() # setup data dirs based on SM CHANNELS num_gpus = len(gpus) # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from if args.autoscale_lr: # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) total_bs = get_dist_info()[2] * cfg.data.imgs_per_gpu cfg.optimizer[ 'learning_rate'] = cfg.optimizer['learning_rate'] * total_bs / 8 # init distributed env first, since logger depends on the dist info. init_dist() if not gpus: distributed = False # single node single gpu else: distributed = True # create work_dir mkdir_or_exist(osp.abspath(cfg.work_dir)) # init the logger before other steps timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp)) logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) # log some basic info logger.info('Distributed training: {}'.format(distributed)) logger.info('TF MMDetection Version: {}'.format(__version__)) logger.info('Config:\n{}'.format(cfg.text)) logger.info('Tensorflow version: {}'.format(tf.version.VERSION)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}, deterministic: {}'.format( args.seed, args.deterministic)) set_random_seed(args.seed + get_dist_info()[0], deterministic=args.deterministic) model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) # dummy data to init network padded_img_side = max(cfg.data.train['scale']) img = tf.random.uniform(shape=[padded_img_side, padded_img_side, 3], dtype=tf.float32) img_meta = tf.constant([ 465., 640., 3., 800., 1101., 3., float(padded_img_side), float(padded_img_side), 3., 1.7204301, 0. ], dtype=tf.float32) # bboxes = tf.constant([[1.0, 1.0, 10.0, 10.0]], dtype=tf.float32) # labels = tf.constant([1], dtype=tf.int32) _ = model((tf.expand_dims(img, axis=0), tf.expand_dims(img_meta, axis=0)), training=False) # print('BEFORE:', model.layers[0].layers[0].get_weights()[0][0,0,0,:]) # sagemaker specific path resolution import os, pathlib data_root = pathlib.Path( os.getenv('SM_CHANNEL_COCO')).joinpath('coco').as_posix() cfg.data.train['dataset_dir'] = data_root cfg.data.val['dataset_dir'] = data_root weights_file = cfg.model['backbone']['weights_path'] weights_path = pathlib.Path( os.getenv('SM_CHANNEL_WEIGHTS')).joinpath(weights_file).as_posix() logger.info('Loading weights from: {}'.format(weights_path)) if osp.splitext(weights_file )[1] == '.h5': # older keras format from Keras model zoo model.layers[0].layers[0].load_weights(weights_path, by_name=True, skip_mismatch=True) else: # SavedModel format assumed - extract weights backbone_model = tf.keras.models.load_model(weights_path) # load weights if layers match for layer_idx, layer in enumerate(backbone_model.layers): if layer_idx < len(model.layers[0].layers[0].layers): model.layers[0].layers[0].layers[layer_idx].set_weights( layer.get_weights()) print('Loaded weights for:', layer.name) del backbone_model # print('AFTER:',model.layers[0].layers[0].get_weights()[0][0,0,0,:]) print_model_info(model, logger) datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: datasets.append(build_dataset(cfg.data.val)) datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) > 1: raise NotImplementedError train_detector(model, datasets, cfg, num_gpus=num_gpus, distributed=distributed, mixed_precision=args.amp, validate=args.validate, timestamp=timestamp)
def main_ec2(args, cfg): # start logger timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp)) logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) """ Main training entry point for jobs launched directly on EC2 instances """ num_gpus = len(gpus) # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from if args.resume_dir is not None: if os.path.exists(args.resume_dir): logger.info("RESUMING TRAINING") # get the latest checkpoint all_chkpt = [ os.path.join(args.resume_dir, d) for d in os.listdir(args.resume_dir) if os.path.isdir(os.path.join(args.resume_dir, d)) ] if not all_chkpt: cfg.resume_from = None else: latest_chkpt = max(all_chkpt, key=os.path.getmtime) # set the latest checkpoint to resume_from cfg.resume_from = latest_chkpt else: logger.info("CHECKPOINT NOT FOUND, RESTARTING TRAINING") cfg.resume_from = None if args.autoscale_lr: # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) total_bs = get_dist_info()[2] * cfg.data.imgs_per_gpu cfg.optimizer[ 'learning_rate'] = cfg.optimizer['learning_rate'] * total_bs / 8 # init distributed env first, since logger depends on the dist info. # init_dist() if not gpus: distributed = False # single node single gpu else: distributed = True # create work_dir mkdir_or_exist(osp.abspath(cfg.work_dir)) # log some basic info logger.info('Distributed training: {}'.format(distributed)) logger.info('TF MMDetection Version: {}'.format(__version__)) logger.info('Config:\n{}'.format(cfg.text)) logger.info('Tensorflow version: {}'.format(tf.version.VERSION)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}, deterministic: {}'.format( args.seed, args.deterministic)) set_random_seed(args.seed + get_dist_info()[0], deterministic=args.deterministic) model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) # dummy data to init network padded_img_side = max(cfg.data.train['scale']) img = tf.random.uniform(shape=[padded_img_side, padded_img_side, 3], dtype=tf.float32) img_meta = tf.constant([ 465., 640., 3., 800., 1101., 3., float(padded_img_side), float(padded_img_side), 3., 1.7204301, 0. ], dtype=tf.float32) # bboxes = tf.constant([[1.0, 1.0, 10.0, 10.0]], dtype=tf.float32) # labels = tf.constant([1], dtype=tf.int32) _ = model((tf.expand_dims(img, axis=0), tf.expand_dims(img_meta, axis=0)), training=False) #model.save('my_model') # print('BEFORE:', model.layers[0].layers[0].get_weights()[0][0,0,0,:]) weights_path = cfg.model['backbone']['weights_path'] logger.info('Loading weights from: {}'.format(weights_path)) if osp.splitext(weights_path )[1] == '.h5': # older keras format from Keras model zoo model.layers[0].layers[0].load_weights(weights_path, by_name=True, skip_mismatch=True) else: # SavedModel format assumed - extract weights backbone_model = tf.keras.models.load_model(weights_path) # load weights if layers match for layer_idx, layer in enumerate(backbone_model.layers): if layer_idx < len(model.layers[0].layers[0].layers): model.layers[0].layers[0].layers[layer_idx].set_weights( layer.get_weights()) print('Loaded weights for:', layer.name) del backbone_model # print('AFTER:',model.layers[0].layers[0].get_weights()[0][0,0,0,:]) print_model_info(model, logger) datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: datasets.append(build_dataset(cfg.data.val)) datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) > 1: raise NotImplementedError train_detector(model, datasets, cfg, num_gpus=num_gpus, distributed=distributed, mixed_precision=args.amp, validate=args.validate, timestamp=timestamp)