def train_net(image_set, year, root_path, devkit_path, pretrained, epoch, prefix, ctx, begin_epoch, end_epoch, frequent, kv_store, work_load_list=None, resume=False): # set up logger logger = logging.getLogger() logger.setLevel(logging.INFO) # load symbol sym = get_vgg_rcnn() # setup multi-gpu config.TRAIN.BATCH_IMAGES *= len(ctx) config.TRAIN.BATCH_SIZE *= len(ctx) # load training data voc, roidb, means, stds = load_rpn_roidb(image_set, year, root_path, devkit_path, flip=True) train_data = ROIIter(roidb, batch_size=config.TRAIN.BATCH_IMAGES, shuffle=True, mode='train', ctx=ctx, work_load_list=work_load_list) # infer max shape max_data_shape = [('data', (1, 3, 1000, 1000))] # load pretrained args, auxs = load_param(pretrained, epoch, convert=True) # initialize params if not resume: input_shapes = {k: v for k, v in train_data.provide_data + train_data.provide_label} arg_shape, _, _ = sym.infer_shape(**input_shapes) arg_shape_dict = dict(zip(sym.list_arguments(), arg_shape)) args['cls_score_weight'] = mx.random.normal(mean=0, stdvar=0.01, shape=arg_shape_dict['cls_score_weight']) args['cls_score_bias'] = mx.nd.zeros(shape=arg_shape_dict['cls_score_bias']) args['bbox_pred_weight'] = mx.random.normal(mean=0, stdvar=0.001, shape=arg_shape_dict['bbox_pred_weight']) args['bbox_pred_bias'] = mx.nd.zeros(shape=arg_shape_dict['bbox_pred_bias']) # train solver = Solver(prefix, sym, ctx, begin_epoch, end_epoch, kv_store, args, auxs, momentum=0.9, wd=0.0005, learning_rate=1e-3, lr_scheduler=mx.lr_scheduler.FactorScheduler(30000, 0.1), mutable_data_shape=True, max_data_shape=max_data_shape) solver.fit(train_data, frequent=frequent) # edit params and save for epoch in range(begin_epoch + 1, end_epoch + 1): arg_params, aux_params = load_checkpoint(prefix, epoch) arg_params['bbox_pred_weight'] = (arg_params['bbox_pred_weight'].T * mx.nd.array(stds)).T arg_params['bbox_pred_bias'] = arg_params['bbox_pred_bias'] * mx.nd.array(stds) + \ mx.nd.array(means) save_checkpoint(prefix, epoch, arg_params, aux_params)
def train_rcnn( image_set, year, root_path, devkit_path, pretrained, epoch, prefix, ctx, begin_epoch, end_epoch, frequent, kv_store, work_load_list=None, ): # load symbol sym = get_vgg_rcnn() # setup multi-gpu config.TRAIN.BATCH_IMAGES *= len(ctx) config.TRAIN.BATCH_SIZE *= len(ctx) # load training data voc, roidb, means, stds = load_rpn_roidb(image_set, year, root_path, devkit_path, flip=True) train_data = ROIIter( roidb, batch_size=config.TRAIN.BATCH_IMAGES, shuffle=True, mode="train", ctx=ctx, work_load_list=work_load_list ) # infer max shape max_data_shape = [("data", (1, 3, 1000, 1000))] # load pretrained args, auxs = load_param(pretrained, epoch, convert=True) # initialize params arg_shape, _, _ = sym.infer_shape(data=(1, 3, 224, 224), rois=(1, 5)) arg_shape_dict = dict(zip(sym.list_arguments(), arg_shape)) args["cls_score_weight"] = mx.random.normal(mean=0, stdvar=0.01, shape=arg_shape_dict["cls_score_weight"]) args["cls_score_bias"] = mx.nd.zeros(shape=arg_shape_dict["cls_score_bias"]) args["bbox_pred_weight"] = mx.random.normal(mean=0, stdvar=0.001, shape=arg_shape_dict["bbox_pred_weight"]) args["bbox_pred_bias"] = mx.nd.zeros(shape=arg_shape_dict["bbox_pred_bias"]) # train solver = Solver( prefix, sym, ctx, begin_epoch, end_epoch, kv_store, args, auxs, momentum=0.9, wd=0.0005, learning_rate=1e-3, lr_scheduler=mx.lr_scheduler.FactorScheduler(30000, 0.1), mutable_data_shape=True, max_data_shape=max_data_shape, ) solver.fit(train_data, frequent=frequent) # edit params and save for epoch in range(begin_epoch + 1, end_epoch + 1): arg_params, aux_params = load_checkpoint(prefix, epoch) arg_params["bbox_pred_weight"] = (arg_params["bbox_pred_weight"].T * mx.nd.array(stds)).T arg_params["bbox_pred_bias"] = arg_params["bbox_pred_bias"] * mx.nd.array(stds) + mx.nd.array(means) save_checkpoint(prefix, epoch, arg_params, aux_params)
def train_net(config): pGen, pKv, pRpn, pRoi, pBbox, pDataset, pModel, pOpt, pTest, \ transform, data_name, label_name, metric_list = config.get_config(is_train=True) pGen = patch_config_as_nothrow(pGen) pKv = patch_config_as_nothrow(pKv) pRpn = patch_config_as_nothrow(pRpn) pRoi = patch_config_as_nothrow(pRoi) pBbox = patch_config_as_nothrow(pBbox) pDataset = patch_config_as_nothrow(pDataset) pModel = patch_config_as_nothrow(pModel) pOpt = patch_config_as_nothrow(pOpt) pTest = patch_config_as_nothrow(pTest) ctx = [mx.gpu(int(i)) for i in pKv.gpus] pretrain_prefix = pModel.pretrain.prefix pretrain_epoch = pModel.pretrain.epoch prefix = pGen.name save_path = os.path.join("experiments", prefix) begin_epoch = pOpt.schedule.begin_epoch end_epoch = pOpt.schedule.end_epoch lr_iter = pOpt.schedule.lr_iter # only rank==0 print all debug infos kvstore_type = "dist_sync" if os.environ.get( "DMLC_ROLE") == "worker" else pKv.kvstore kv = mx.kvstore.create(kvstore_type) rank = kv.rank # for distributed training using shared file system os.makedirs(save_path, exist_ok=True) from utils.logger import config_logger config_logger(os.path.join(save_path, "log.txt")) model_prefix = os.path.join(save_path, "checkpoint") # set up logger logger = logging.getLogger() sym = pModel.train_symbol # setup multi-gpu input_batch_size = pKv.batch_image * len(ctx) # print config # if rank == 0: # logger.info(pprint.pformat(config)) # load dataset and prepare imdb for training image_sets = pDataset.image_set roidbs = [ pkl.load(open("data/cache/{}.roidb".format(i), "rb"), encoding="latin1") for i in image_sets ] roidb = reduce(lambda x, y: x + y, roidbs) # filter empty image roidb = [rec for rec in roidb if rec["gt_bbox"].shape[0] > 0] # add flip roi record flipped_roidb = [] for rec in roidb: new_rec = rec.copy() new_rec["flipped"] = True flipped_roidb.append(new_rec) roidb = roidb + flipped_roidb from core.detection_input import AnchorLoader train_data = AnchorLoader(roidb=roidb, transform=transform, data_name=data_name, label_name=label_name, batch_size=input_batch_size, shuffle=True, kv=kv, num_worker=pGen.loader_worker or 12, num_collector=pGen.loader_collector or 1, worker_queue_depth=2, collector_queue_depth=2) # infer shape worker_data_shape = dict(train_data.provide_data + train_data.provide_label) for key in worker_data_shape: worker_data_shape[key] = ( pKv.batch_image, ) + worker_data_shape[key][1:] arg_shape, _, aux_shape = sym.infer_shape(**worker_data_shape) _, out_shape, _ = sym.get_internals().infer_shape(**worker_data_shape) out_shape_dict = list(zip(sym.get_internals().list_outputs(), out_shape)) _, out_shape, _ = sym.infer_shape(**worker_data_shape) terminal_out_shape_dict = zip(sym.list_outputs(), out_shape) if rank == 0: logger.info('parameter shape') logger.info( pprint.pformat( [i for i in out_shape_dict if not i[0].endswith('output')])) logger.info('intermediate output shape') logger.info( pprint.pformat( [i for i in out_shape_dict if i[0].endswith('output')])) logger.info('terminal output shape') logger.info(pprint.pformat([i for i in terminal_out_shape_dict])) # memonger if pModel.memonger: last_block = pModel.memonger_until or "" if rank == 0: logger.info("do memonger up to {}".format(last_block)) type_dict = {k: np.float32 for k in worker_data_shape} sym = search_plan_to_layer(sym, last_block, 1000, type_dict=type_dict, **worker_data_shape) # load and initialize params if pOpt.schedule.begin_epoch != 0: arg_params, aux_params = load_checkpoint(model_prefix, begin_epoch) elif pModel.from_scratch: arg_params, aux_params = dict(), dict() else: arg_params, aux_params = load_checkpoint(pretrain_prefix, pretrain_epoch) if pModel.process_weight is not None: pModel.process_weight(sym, arg_params, aux_params) # merge batch normalization to save memory in fix bn training from utils.graph_optimize import merge_bn sym, arg_params, aux_params = merge_bn(sym, arg_params, aux_params) if pModel.random: import time mx.random.seed(int(time.time())) np.random.seed(int(time.time())) init = mx.init.Xavier(factor_type="in", rnd_type='gaussian', magnitude=2) init.set_verbosity(verbose=True) # create solver fixed_param = pModel.pretrain.fixed_param excluded_param = pModel.pretrain.excluded_param data_names = [k[0] for k in train_data.provide_data] label_names = [k[0] for k in train_data.provide_label] if pModel.teacher_param: from models.KD.utils import create_teacher_module from models.KD.detection_module import KDDetModule t_mod, t_label_name, t_label_shape = create_teacher_module( pModel.teacher_param, worker_data_shape, input_batch_size, ctx, rank, logger) mod = KDDetModule(sym, teacher_module=t_mod, teacher_label_names=t_label_name, teacher_label_shapes=t_label_shape, data_names=data_names, label_names=label_names, logger=logger, context=ctx, fixed_param=fixed_param, excluded_param=excluded_param) else: mod = DetModule(sym, data_names=data_names, label_names=label_names, logger=logger, context=ctx, fixed_param=fixed_param, excluded_param=excluded_param) eval_metrics = mx.metric.CompositeEvalMetric(metric_list) # callback batch_end_callback = [ callback.Speedometer(train_data.batch_size, frequent=pGen.log_frequency) ] batch_end_callback += pModel.batch_end_callbacks or [] epoch_end_callback = callback.do_checkpoint(model_prefix) sym.save(model_prefix + ".json") # decide learning rate lr_mode = pOpt.optimizer.lr_mode or 'step' base_lr = pOpt.optimizer.lr * kv.num_workers lr_factor = pOpt.schedule.lr_factor or 0.1 iter_per_epoch = len(train_data) // input_batch_size total_iter = iter_per_epoch * (end_epoch - begin_epoch) lr_iter = [total_iter + it if it < 0 else it for it in lr_iter] lr_iter = [it // kv.num_workers for it in lr_iter] lr_iter = [it - iter_per_epoch * begin_epoch for it in lr_iter] lr_iter_discount = [it for it in lr_iter if it > 0] current_lr = base_lr * (lr_factor**(len(lr_iter) - len(lr_iter_discount))) if rank == 0: logging.info('total iter {}'.format(total_iter)) logging.info('lr {}, lr_iters {}'.format(current_lr, lr_iter_discount)) logging.info('lr mode: {}'.format(lr_mode)) if pOpt.warmup and pOpt.schedule.begin_epoch == 0: if rank == 0: logging.info('warmup lr {}, warmup step {}'.format( pOpt.warmup.lr, pOpt.warmup.iter)) if lr_mode == 'step': lr_scheduler = WarmupMultiFactorScheduler( step=lr_iter_discount, factor=lr_factor, warmup=True, warmup_type=pOpt.warmup.type, warmup_lr=pOpt.warmup.lr, warmup_step=pOpt.warmup.iter) elif lr_mode == 'cosine': warmup_lr_scheduler = AdvancedLRScheduler(mode='linear', base_lr=pOpt.warmup.lr, target_lr=base_lr, niters=pOpt.warmup.iter) cosine_lr_scheduler = AdvancedLRScheduler( mode='cosine', base_lr=base_lr, target_lr=0, niters=(iter_per_epoch * (end_epoch - begin_epoch)) // kv.num_workers - pOpt.warmup.iter) lr_scheduler = LRSequential( [warmup_lr_scheduler, cosine_lr_scheduler]) else: raise NotImplementedError else: if lr_mode == 'step': lr_scheduler = WarmupMultiFactorScheduler(step=lr_iter_discount, factor=lr_factor) elif lr_mode == 'cosine': lr_scheduler = AdvancedLRScheduler(mode='cosine', base_lr=base_lr, target_lr=0, niters=iter_per_epoch * (end_epoch - begin_epoch) // kv.num_workers) else: lr_scheduler = None # optimizer optimizer_params = dict(momentum=pOpt.optimizer.momentum, wd=pOpt.optimizer.wd, learning_rate=current_lr, lr_scheduler=lr_scheduler, rescale_grad=1.0 / (len(ctx) * kv.num_workers), clip_gradient=pOpt.optimizer.clip_gradient) if pKv.fp16: optimizer_params['multi_precision'] = True optimizer_params['rescale_grad'] /= 128.0 profile = pGen.profile or False if profile: mx.profiler.set_config(profile_all=True, filename=os.path.join(save_path, "profile.json")) # train mod.fit(train_data=train_data, eval_metric=eval_metrics, epoch_end_callback=epoch_end_callback, batch_end_callback=batch_end_callback, kvstore=kv, optimizer=pOpt.optimizer.type, optimizer_params=optimizer_params, initializer=init, allow_missing=True, arg_params=arg_params, aux_params=aux_params, begin_epoch=begin_epoch, num_epoch=end_epoch, profile=profile) logging.info("Training has done") time.sleep(10) logging.info("Exiting")
def train_rcnn(image_set, year, root_path, devkit_path, pretrained, epoch, prefix, ctx, begin_epoch, end_epoch, frequent, kv_store, work_load_list=None, resume=False, proposal='rpn'): # set up logger logger = logging.getLogger() logger.setLevel(logging.INFO) # load symbol sym = get_vgg_rcnn() # setup multi-gpu config.TRAIN.BATCH_IMAGES *= len(ctx) config.TRAIN.BATCH_SIZE *= len(ctx) # load training data voc, roidb, means, stds = eval('load_' + proposal + '_roidb')(image_set, year, root_path, devkit_path, flip=True) train_data = ROIIter(roidb, batch_size=config.TRAIN.BATCH_IMAGES, shuffle=True, mode='train', ctx=ctx, work_load_list=work_load_list) # infer max shape max_data_shape = [('data', (config.TRAIN.BATCH_IMAGES, 3, 1000, 1000))] # load pretrained args, auxs = load_param(pretrained, epoch, convert=True) # initialize params if not resume: input_shapes = {k: v for k, v in train_data.provide_data + train_data.provide_label} arg_shape, _, _ = sym.infer_shape(**input_shapes) arg_shape_dict = dict(zip(sym.list_arguments(), arg_shape)) args['cls_score_weight'] = mx.random.normal(0, 0.01, shape=arg_shape_dict['cls_score_weight']) args['cls_score_bias'] = mx.nd.zeros(shape=arg_shape_dict['cls_score_bias']) args['bbox_pred_weight'] = mx.random.normal(0, 0.001, shape=arg_shape_dict['bbox_pred_weight']) args['bbox_pred_bias'] = mx.nd.zeros(shape=arg_shape_dict['bbox_pred_bias']) # prepare training if config.TRAIN.FINETUNE: fixed_param_prefix = ['conv1', 'conv2', 'conv3', 'conv4', 'conv5'] else: fixed_param_prefix = ['conv1', 'conv2'] data_names = [k[0] for k in train_data.provide_data] label_names = [k[0] for k in train_data.provide_label] batch_end_callback = Speedometer(train_data.batch_size, frequent=frequent) epoch_end_callback = mx.callback.do_checkpoint(prefix) if config.TRAIN.HAS_RPN is True: eval_metric = AccuracyMetric(use_ignore=True, ignore=-1) cls_metric = LogLossMetric(use_ignore=True, ignore=-1) else: eval_metric = AccuracyMetric() cls_metric = LogLossMetric() bbox_metric = SmoothL1LossMetric() eval_metrics = mx.metric.CompositeEvalMetric() for child_metric in [eval_metric, cls_metric, bbox_metric]: eval_metrics.add(child_metric) optimizer_params = {'momentum': 0.9, 'wd': 0.0005, 'learning_rate': 0.001, 'lr_scheduler': mx.lr_scheduler.FactorScheduler(30000, 0.1), 'rescale_grad': (1.0 / config.TRAIN.BATCH_SIZE)} # train mod = MutableModule(sym, data_names=data_names, label_names=label_names, logger=logger, context=ctx, work_load_list=work_load_list, max_data_shapes=max_data_shape, fixed_param_prefix=fixed_param_prefix) mod.fit(train_data, eval_metric=eval_metrics, epoch_end_callback=epoch_end_callback, batch_end_callback=batch_end_callback, kvstore=kv_store, optimizer='sgd', optimizer_params=optimizer_params, arg_params=args, aux_params=auxs, begin_epoch=begin_epoch, num_epoch=end_epoch) # edit params and save for epoch in range(begin_epoch + 1, end_epoch + 1): arg_params, aux_params = load_checkpoint(prefix, epoch) arg_params['bbox_pred_weight'] = (arg_params['bbox_pred_weight'].T * mx.nd.array(stds)).T arg_params['bbox_pred_bias'] = arg_params['bbox_pred_bias'] * mx.nd.array(stds) + \ mx.nd.array(means) save_checkpoint(prefix, epoch, arg_params, aux_params)
transform=transform, data_name=data_name, label_name=label_name, batch_size=1, shuffle=False, num_worker=4, num_collector=2, worker_queue_depth=2, collector_queue_depth=2) print("total number of images: {}".format(loader.total_batch)) data_names = [k[0] for k in loader.provide_data] if index_split == 0: arg_params, aux_params = load_checkpoint(pTest.model.prefix, pTest.model.epoch) if pModel.process_weight is not None: pModel.process_weight(sym, arg_params, aux_params) # merge batch normalization from utils.graph_optimize import merge_bn sym, arg_params, aux_params = merge_bn(sym, arg_params, aux_params) for i in pKv.gpus: ctx = mx.gpu(i) mod = DetModule(sym, data_names=data_names, context=ctx) mod.bind(data_shapes=loader.provide_data, for_training=False) mod.set_params(arg_params, aux_params, allow_extra=False) execs.append(mod) all_outputs = []