def resnet50_eval(args_opt): class_num = cfg.class_num local_data_path = '/cache/data' ckpt_file_slice = args_opt.checkpoint_path.split('/') ckpt_file = ckpt_file_slice[len(ckpt_file_slice) - 1] local_ckpt_path = '/cache/' + ckpt_file # set graph mode and parallel mode context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False) # data download print('Download data.') mox.file.copy_parallel(src_url=args_opt.data_url, dst_url=local_data_path) mox.file.copy_parallel(src_url=args_opt.checkpoint_path, dst_url=local_ckpt_path) # create dataset dataset = create_dataset(dataset_path=local_data_path, do_train=False, batch_size=cfg.batch_size) # load checkpoint into net net = resnet50(class_num=class_num) param_dict = load_checkpoint(local_ckpt_path) load_param_into_net(net, param_dict) net.set_train(False) # define loss and model if not cfg.use_label_smooth: cfg.label_smooth_factor = 0.0 loss = CrossEntropySmooth(sparse=True, reduction='mean', smooth_factor=cfg.label_smooth_factor, num_classes=cfg.class_num) model = Model(net, loss_fn=loss, metrics={'top_1_accuracy', 'top_5_accuracy'}) # eval model res = model.eval(dataset) print("result:", res, "ckpt=", args_opt.checkpoint_path)
def eval_(): # set args dev = "GPU" compute_type = str(args_opt.dtype).lower() ckpt_dir = str(args_opt.ckpt_path) total_batch = int(args_opt.batch_size) # init context if args_opt.mode == "GRAPH": mode = context.GRAPH_MODE else: mode = context.PYNATIVE_MODE context.set_context(mode=mode, device_target=dev, save_graphs=False) # create dataset dataset = create_dataset(dataset_path=args_opt.dataset_path, do_train=False, repeat_num=1, batch_size=total_batch, target=dev, dtype=compute_type) # define net net = resnet(class_num=1001, dtype=compute_type) # load checkpoint param_dict = load_checkpoint(ckpt_dir) load_param_into_net(net, param_dict) net.set_train(False) # define loss, model loss = CrossEntropySmooth(sparse=True, reduction='mean', smooth_factor=0.1, num_classes=1001) # define model model = Model(net, loss_fn=loss, metrics={'top_1_accuracy', 'top_5_accuracy'}) # eval model print("========START EVAL RESNET50 ON GPU ========") res = model.eval(dataset) print("result:", res, "ckpt=", ckpt_dir)
}, { 'params': decay_params }] if cfg.is_dynamic_loss_scale: cfg.loss_scale = 1 opt = Momentum(params=get_param_groups(net), learning_rate=Tensor(lr), momentum=cfg.momentum, weight_decay=cfg.weight_decay, loss_scale=cfg.loss_scale) if not cfg.use_label_smooth: cfg.label_smooth_factor = 0.0 loss = CrossEntropySmooth(sparse=True, reduction="mean", smooth_factor=cfg.label_smooth_factor, num_classes=cfg.num_classes) if cfg.is_dynamic_loss_scale: loss_scale_manager = DynamicLossScaleManager(init_loss_scale=65536, scale_factor=2, scale_window=2000) else: loss_scale_manager = FixedLossScaleManager( cfg.loss_scale, drop_overflow_update=False) model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}, amp_level="O3",
def train(): # set args dev = "GPU" epoch_size = int(args_opt.epoch_size) total_batch = int(args_opt.batch_size) print_per_steps = int(args_opt.print_per_steps) compute_type = str(args_opt.dtype).lower() ckpt_save_dir = str(args_opt.ckpt_path) save_ckpt = bool(args_opt.save_ckpt) device_num = 1 # init context if args_opt.mode == "GRAPH": mode = context.GRAPH_MODE all_reduce_fusion_config = [85, 160] else: mode = context.PYNATIVE_MODE all_reduce_fusion_config = [30, 90, 160] context.set_context(mode=mode, device_target=dev, save_graphs=False) if args_opt.run_distribute: init() device_num = get_group_size() context.set_auto_parallel_context( device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True, all_reduce_fusion_config=all_reduce_fusion_config) ckpt_save_dir = ckpt_save_dir + "ckpt_" + str(get_rank()) + "/" # create dataset dataset = create_dataset(dataset_path=args_opt.dataset_path, do_train=True, repeat_num=1, batch_size=total_batch, target=dev, dtype=compute_type, device_num=device_num) step_size = dataset.get_dataset_size() if (print_per_steps > step_size or print_per_steps < 1): print("Arg: print_per_steps should lessequal to dataset_size ", step_size) print("Change to default: 20") print_per_steps = 20 # define net net = resnet(class_num=1001, dtype=compute_type) # init weight for _, cell in net.cells_and_names(): if isinstance(cell, nn.Conv2d): cell.weight.set_data( weight_init.initializer(weight_init.XavierUniform(), cell.weight.shape, cell.weight.dtype)) if isinstance(cell, nn.Dense): cell.weight.set_data( weight_init.initializer(weight_init.TruncatedNormal(), cell.weight.shape, cell.weight.dtype)) # init lr lr = get_liner_lr(lr_init=0, lr_end=0, lr_max=0.8, warmup_epochs=0, total_epochs=epoch_size, steps_per_epoch=step_size) lr = Tensor(lr) # define opt decayed_params = [] no_decayed_params = [] for param in net.trainable_params(): if 'beta' not in param.name and 'gamma' not in param.name and 'bias' not in param.name: decayed_params.append(param) else: no_decayed_params.append(param) # define loss, model loss = CrossEntropySmooth(sparse=True, reduction='mean', smooth_factor=0.1, num_classes=1001) opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, 0.9, 1e-4) loss_scale = FixedLossScaleManager(1024, drop_overflow_update=False) model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}) # Mixed precision if compute_type == "fp16": if mode == context.PYNATIVE_MODE: opt = MomentumWeightDecay( filter(lambda x: x.requires_grad, net.get_parameters()), lr, 0.9, 1e-4, 1024) else: opt = Momentum( filter(lambda x: x.requires_grad, net.get_parameters()), lr, 0.9, 1e-4, 1024) model = Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics={'acc'}, amp_level="O2", keep_batchnorm_fp32=False) # define callbacks if mode == context.PYNATIVE_MODE: print_per_steps = 1 time_cb = MyTimeMonitor(total_batch, print_per_steps, step_size, mode) cb = [time_cb] if save_ckpt: config_ck = CheckpointConfig(save_checkpoint_steps=5 * step_size, keep_checkpoint_max=5) ckpt_cb = ModelCheckpoint(prefix="resnet_benchmark", directory=ckpt_save_dir, config=config_ck) cb += [ckpt_cb] # train model print("========START RESNET50 GPU BENCHMARK========") if mode == context.GRAPH_MODE: model.train(int(epoch_size * step_size / print_per_steps), dataset, callbacks=cb, sink_size=print_per_steps) else: model.train(epoch_size, dataset, callbacks=cb)
# init context context.set_context(mode=context.GRAPH_MODE, device_target=target, save_graphs=False) # create dataset local_data_path = args_opt.data_url print('Download data.') dataset = create_dataset(dataset_path=local_data_path, do_train=False, batch_size=config.batch_size, target=target) step_size = dataset.get_dataset_size() # define net net = resnet(class_num=config.class_num) ckpt_name = args_opt.checkpoint_path param_dict = load_checkpoint(ckpt_name) load_param_into_net(net, param_dict) net.set_train(False) # define loss, model if not config.use_label_smooth: config.label_smooth_factor = 0.0 loss = CrossEntropySmooth(sparse=True, reduction='mean', smooth_factor=config.label_smooth_factor, num_classes=config.class_num) # define model model = Model(net, loss_fn=loss, metrics={'top_1_accuracy', 'top_5_accuracy'}) # eval model res = model.eval(dataset) print("result:", res, "ckpt=", ckpt_name)
parser.add_argument('--platform', type=str, default='GPU', choices=('Ascend', 'GPU'), help='run platform') args_opt = parser.parse_args() if args_opt.platform != 'GPU': raise ValueError("Only supported GPU training.") context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.platform, device_id=0) net = ShuffleNetV2(n_class=cfg.num_classes) ckpt = load_checkpoint(args_opt.checkpoint) load_param_into_net(net, ckpt) net.set_train(False) dataset = create_dataset(args_opt.dataset_path, False, 0, 1) loss = CrossEntropySmooth(sparse=True, reduction='mean', smooth_factor=0.1, num_classes=cfg.num_classes) eval_metrics = { 'Loss': nn.Loss(), 'Top1-Acc': nn.Top1CategoricalAccuracy(), 'Top5-Acc': nn.Top5CategoricalAccuracy() } model = Model(net, loss, optimizer=None, metrics=eval_metrics) metrics = model.eval(dataset) print("metric: ", metrics)