def create_network(name, *args, **kwargs): if name == "mobilenetv3_large": net = mobilenet_v3_large(*args, **kwargs) elif name == "mobilenetv3_small": net = mobilenet_v3_small(*args, **kwargs) else: raise NotImplementedError(f"{name} is not implemented in the repo") return net
1, cb_params.epoch_num, cur_step_in_epoch, cb_params.batch_num, step_loss, np.mean(self.losses), step_mseconds, self.lr_init[cb_params.cur_step_num - 1])) if __name__ == '__main__': if args_opt.platform == "GPU": # train on gpu print("train args: ", args_opt, "\ncfg: ", config_gpu) init('nccl') context.set_auto_parallel_context(parallel_mode="data_parallel", mirror_mean=True, device_num=get_group_size()) # define net net = mobilenet_v3_large(num_classes=config_gpu.num_classes) # define loss if config_gpu.label_smooth > 0: loss = CrossEntropyWithLabelSmooth( smooth_factor=config_gpu.label_smooth, num_classes=config_gpu.num_classes) else: loss = SoftmaxCrossEntropyWithLogits( is_grad=False, sparse=True, reduction='mean') # define dataset epoch_size = config_gpu.epoch_size dataset = create_dataset(dataset_path=args_opt.dataset_path, do_train=True, config=config_gpu, platform=args_opt.platform, repeat_num=epoch_size, batch_size=config_gpu.batch_size)
dataset = create_dataset(dataset_path=args_opt.dataset_path, do_train=False, config=config, device_target=args_opt.device_target, batch_size=config.batch_size) elif args_opt.device_target == "CPU": config = config_cpu context.set_context(mode=context.GRAPH_MODE, device_target="CPU", save_graphs=False) dataset = create_dataset_cifar(dataset_path=args_opt.dataset_path, do_train=False, batch_size=config.batch_size) else: raise ValueError("Unsupported device_target.") loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') net = mobilenet_v3_large(num_classes=config.num_classes, activation="Softmax") step_size = dataset.get_dataset_size() if args_opt.checkpoint_path: param_dict = load_checkpoint(args_opt.checkpoint_path) load_param_into_net(net, param_dict) net.set_train(False) model = Model(net, loss_fn=loss, metrics={'acc'}) res = model.eval(dataset) print("result:", res, "ckpt=", args_opt.checkpoint_path)
if __name__ == '__main__': config_ = None if args_opt.device_target == "GPU": config_ = config_gpu elif args_opt.device_target == "CPU": config_ = config_cpu else: raise ValueError("Unsupported device_target.") # train on device print("train args: ", args_opt) print("cfg: ", config_) # define net net = mobilenet_v3_large(num_classes=config_.num_classes) # define loss if config_.label_smooth > 0: loss = CrossEntropyWithLabelSmooth(smooth_factor=config_.label_smooth, num_classes=config_.num_classes) else: loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') # define dataset epoch_size = config_.epoch_size if args_opt.device_target == "GPU": dataset = create_dataset(dataset_path=args_opt.dataset_path, do_train=True, config=config_, device_target=args_opt.device_target, repeat_num=1, batch_size=config_.batch_size,
config_platform = None if args_opt.platform == "Ascend": config_platform = config_ascend device_id = int(os.getenv('DEVICE_ID')) context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=device_id, save_graphs=False) elif args_opt.platform == "GPU": config_platform = config_gpu context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=False) else: raise ValueError("Unsupport platform.") loss = nn.SoftmaxCrossEntropyWithLogits( is_grad=False, sparse=True, reduction='mean') net = mobilenet_v3_large(num_classes=config_platform.num_classes) if args_opt.platform == "Ascend": net.to_float(mstype.float16) for _, cell in net.cells_and_names(): if isinstance(cell, nn.Dense): cell.to_float(mstype.float32) dataset = create_dataset(dataset_path=args_opt.dataset_path, do_train=False, config=config_platform, platform=args_opt.platform, batch_size=config_platform.batch_size) step_size = dataset.get_dataset_size() if args_opt.checkpoint_path: