def ssd_eval(dataset_path, ckpt_path, anno_json): """SSD evaluation.""" batch_size = 1 ds = create_ssd_dataset(dataset_path, batch_size=batch_size, repeat_num=1, is_training=False, use_multiprocessing=False) if config.model == "ssd300": net = SSD300(ssd_mobilenet_v2(), config, is_training=False) elif config.model == "ssd_vgg16": net = ssd_vgg16(config=config) elif config.model == "ssd_mobilenet_v1_fpn": net = ssd_mobilenet_v1_fpn(config=config) elif config.model == "ssd_resnet50_fpn": net = ssd_resnet50_fpn(config=config) else: raise ValueError(f'config.model: {config.model} is not supported') net = SsdInferWithDecoder(net, Tensor(default_boxes), config) print("Load Checkpoint!") param_dict = load_checkpoint(ckpt_path) net.init_parameters_data() load_param_into_net(net, param_dict) net.set_train(False) total = ds.get_dataset_size() * batch_size print("\n========================================\n") print("total images num: ", total) print("Processing, please wait a moment.") eval_param_dict = {"net": net, "dataset": ds, "anno_json": anno_json} mAP = apply_eval(eval_param_dict) print("\n========================================\n") print(f"mAP: {mAP}")
def ssd_model_build(args_opt): if config.model == "ssd300": backbone = ssd_mobilenet_v2() ssd = SSD300(backbone=backbone, config=config) init_net_param(ssd) if args_opt.freeze_layer == "backbone": for param in backbone.feature_1.trainable_params(): param.requires_grad = False elif config.model == "ssd_mobilenet_v1_fpn": ssd = ssd_mobilenet_v1_fpn(config=config) init_net_param(ssd) if config.feature_extractor_base_param != "": param_dict = load_checkpoint(config.feature_extractor_base_param) for x in list(param_dict.keys()): param_dict["network.feature_extractor.mobilenet_v1." + x] = param_dict[x] del param_dict[x] load_param_into_net(ssd.feature_extractor.mobilenet_v1.network, param_dict) elif config.model == "ssd_resnet50_fpn": ssd = ssd_resnet50_fpn(config=config) init_net_param(ssd) if config.feature_extractor_base_param != "": param_dict = load_checkpoint(config.feature_extractor_base_param) for x in list(param_dict.keys()): param_dict["network.feature_extractor.resnet." + x] = param_dict[x] del param_dict[x] load_param_into_net(ssd.feature_extractor.resnet, param_dict) else: raise ValueError(f'config.model: {config.model} is not supported') return ssd
def ssd_eval(dataset_path, ckpt_path, anno_json): """SSD evaluation.""" batch_size = 1 ds = create_ssd_dataset(dataset_path, batch_size=batch_size, repeat_num=1, is_training=False, use_multiprocessing=False) if config.model == "ssd300": net = SSD300(ssd_mobilenet_v2(), config, is_training=False) elif config.model == "ssd_vgg16": net = ssd_vgg16(config=config) elif config.model == "ssd_mobilenet_v1_fpn": net = ssd_mobilenet_v1_fpn(config=config) elif config.model == "ssd_resnet50_fpn": net = ssd_resnet50_fpn(config=config) else: raise ValueError(f'config.model: {config.model} is not supported') net = SsdInferWithDecoder(net, Tensor(default_boxes), config) print("Load Checkpoint!") param_dict = load_checkpoint(ckpt_path) net.init_parameters_data() load_param_into_net(net, param_dict) net.set_train(False) i = batch_size total = ds.get_dataset_size() * batch_size start = time.time() pred_data = [] print("\n========================================\n") print("total images num: ", total) print("Processing, please wait a moment.") for data in ds.create_dict_iterator(output_numpy=True, num_epochs=1): img_id = data['img_id'] img_np = data['image'] image_shape = data['image_shape'] output = net(Tensor(img_np)) for batch_idx in range(img_np.shape[0]): pred_data.append({ "boxes": output[0].asnumpy()[batch_idx], "box_scores": output[1].asnumpy()[batch_idx], "img_id": int(np.squeeze(img_id[batch_idx])), "image_shape": image_shape[batch_idx] }) percent = round(i / total * 100., 2) print(f' {str(percent)} [{i}/{total}]', end='\r') i += batch_size cost_time = int((time.time() - start) * 1000) print(f' 100% [{total}/{total}] cost {cost_time} ms') mAP = metrics(pred_data, anno_json) print("\n========================================\n") print(f"mAP: {mAP}")
def ssd_eval(dataset_path, ckpt_path): """SSD evaluation.""" batch_size = 1 ds = create_ssd_dataset(dataset_path, batch_size=batch_size, repeat_num=1, is_training=False) net = SSD300(ssd_mobilenet_v2(), config, is_training=False) print("Load Checkpoint!") param_dict = load_checkpoint(ckpt_path) net.init_parameters_data() load_param_into_net(net, param_dict) net.set_train(False) i = batch_size total = ds.get_dataset_size() * batch_size start = time.time() pred_data = [] print("\n========================================\n") print("total images num: ", total) print("Processing, please wait a moment.") for data in ds.create_dict_iterator(output_numpy=True): img_id = data['img_id'] img_np = data['image'] image_shape = data['image_shape'] output = net(Tensor(img_np)) for batch_idx in range(img_np.shape[0]): pred_data.append({ "boxes": output[0].asnumpy()[batch_idx], "box_scores": output[1].asnumpy()[batch_idx], "img_id": int(np.squeeze(img_id[batch_idx])), "image_shape": image_shape[batch_idx] }) percent = round(i / total * 100., 2) print(f' {str(percent)} [{i}/{total}]', end='\r') i += batch_size cost_time = int((time.time() - start) * 1000) print(f' 100% [{total}/{total}] cost {cost_time} ms') mAP = metrics(pred_data) print("\n========================================\n") print(f"mAP: {mAP}")
parser.add_argument("--device_id", type=int, default=0, help="Device id") parser.add_argument("--batch_size", type=int, default=1, help="batch size") parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.") parser.add_argument("--file_name", type=str, default="ssd", help="output file name.") parser.add_argument('--file_format', type=str, choices=["AIR", "MINDIR"], default='AIR', help='file format') parser.add_argument("--device_target", type=str, choices=["Ascend", "GPU", "CPU"], default="Ascend", help="device target") args = parser.parse_args() context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target) if args.device_target == "Ascend": context.set_context(device_id=args.device_id) if __name__ == '__main__': if config.model == "ssd300": net = SSD300(ssd_mobilenet_v2(), config, is_training=False) elif config.model == "ssd_vgg16": net = ssd_vgg16(config=config) elif config.model == "ssd_mobilenet_v1_fpn": net = ssd_mobilenet_v1_fpn(config=config) elif config.model == "ssd_resnet50_fpn": net = ssd_resnet50_fpn(config=config) else: raise ValueError(f'config.model: {config.model} is not supported') net = SsdInferWithDecoder(net, Tensor(default_boxes), config) param_dict = load_checkpoint(args.ckpt_file) net.init_parameters_data() load_param_into_net(net, param_dict) net.set_train(False)
def main(): parser = argparse.ArgumentParser(description="SSD training") parser.add_argument( "--only_create_dataset", type=ast.literal_eval, default=False, help="If set it true, only create Mindrecord, default is False.") parser.add_argument("--distribute", type=ast.literal_eval, default=False, help="Run distribute, default is False.") parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.") parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.") parser.add_argument("--lr", type=float, default=0.05, help="Learning rate, default is 0.05.") parser.add_argument("--mode", type=str, default="sink", help="Run sink mode or not, default is sink.") parser.add_argument("--dataset", type=str, default="coco", help="Dataset, defalut is coco.") parser.add_argument("--epoch_size", type=int, default=500, help="Epoch size, default is 500.") parser.add_argument("--batch_size", type=int, default=32, help="Batch size, default is 32.") parser.add_argument("--pre_trained", type=str, default=None, help="Pretrained Checkpoint file path.") parser.add_argument("--pre_trained_epoch_size", type=int, default=0, help="Pretrained epoch size.") parser.add_argument("--save_checkpoint_epochs", type=int, default=10, help="Save checkpoint epochs, default is 10.") parser.add_argument("--loss_scale", type=int, default=1024, help="Loss scale, default is 1024.") parser.add_argument("--filter_weight", type=ast.literal_eval, default=False, help="Filter weight parameters, default is False.") args_opt = parser.parse_args() context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=args_opt.device_id) if args_opt.distribute: device_num = args_opt.device_num context.reset_auto_parallel_context() context.set_auto_parallel_context( parallel_mode=ParallelMode.DATA_PARALLEL, mirror_mean=True, device_num=device_num) init() rank = args_opt.device_id % device_num else: rank = 0 device_num = 1 print("Start create dataset!") # It will generate mindrecord file in args_opt.mindrecord_dir, # and the file name is ssd.mindrecord0, 1, ... file_num. prefix = "ssd.mindrecord" mindrecord_dir = config.mindrecord_dir mindrecord_file = os.path.join(mindrecord_dir, prefix + "0") if not os.path.exists(mindrecord_file): if not os.path.isdir(mindrecord_dir): os.makedirs(mindrecord_dir) if args_opt.dataset == "coco": if os.path.isdir(config.coco_root): print("Create Mindrecord.") data_to_mindrecord_byte_image("coco", True, prefix) print("Create Mindrecord Done, at {}".format(mindrecord_dir)) else: print("coco_root not exits.") elif args_opt.dataset == "voc": if os.path.isdir(config.voc_dir): print("Create Mindrecord.") voc_data_to_mindrecord(mindrecord_dir, True, prefix) print("Create Mindrecord Done, at {}".format(mindrecord_dir)) else: print("voc_dir not exits.") else: if os.path.isdir(config.image_dir) and os.path.exists( config.anno_path): print("Create Mindrecord.") data_to_mindrecord_byte_image("other", True, prefix) print("Create Mindrecord Done, at {}".format(mindrecord_dir)) else: print("image_dir or anno_path not exits.") if not args_opt.only_create_dataset: loss_scale = float(args_opt.loss_scale) # When create MindDataset, using the fitst mindrecord file, such as ssd.mindrecord0. dataset = create_ssd_dataset(mindrecord_file, repeat_num=1, batch_size=args_opt.batch_size, device_num=device_num, rank=rank) dataset_size = dataset.get_dataset_size() print("Create dataset done!") backbone = ssd_mobilenet_v2() ssd = SSD300(backbone=backbone, config=config) net = SSDWithLossCell(ssd, config) init_net_param(net) # checkpoint ckpt_config = CheckpointConfig(save_checkpoint_steps=dataset_size * args_opt.save_checkpoint_epochs) ckpoint_cb = ModelCheckpoint(prefix="ssd", directory=None, config=ckpt_config) if args_opt.pre_trained: if args_opt.pre_trained_epoch_size <= 0: raise KeyError( "pre_trained_epoch_size must be greater than 0.") param_dict = load_checkpoint(args_opt.pre_trained) if args_opt.filter_weight: filter_checkpoint_parameter(param_dict) load_param_into_net(net, param_dict) lr = Tensor( get_lr(global_step=config.global_step, lr_init=config.lr_init, lr_end=config.lr_end_rate * args_opt.lr, lr_max=args_opt.lr, warmup_epochs=config.warmup_epochs, total_epochs=args_opt.epoch_size, steps_per_epoch=dataset_size)) opt = nn.Momentum( filter(lambda x: x.requires_grad, net.get_parameters()), lr, config.momentum, config.weight_decay, loss_scale) net = TrainingWrapper(net, opt, loss_scale) callback = [ TimeMonitor(data_size=dataset_size), LossMonitor(), ckpoint_cb ] model = Model(net) dataset_sink_mode = False if args_opt.mode == "sink": print("In sink mode, one epoch return a loss.") dataset_sink_mode = True print( "Start train SSD, the first epoch will be slower because of the graph compilation." ) model.train(args_opt.epoch_size, dataset, callbacks=callback, dataset_sink_mode=dataset_sink_mode)
def main(): args_opt = get_args() rank = 0 device_num = 1 if args_opt.run_platform == "CPU": context.set_context(mode=context.GRAPH_MODE, device_target="CPU") else: context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.run_platform, device_id=args_opt.device_id) if args_opt.distribute: device_num = args_opt.device_num context.reset_auto_parallel_context() context.set_auto_parallel_context( parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True, device_num=device_num) init() context.set_auto_parallel_context( all_reduce_fusion_config=[29, 58, 89]) rank = get_rank() mindrecord_file = create_mindrecord(args_opt.dataset, "ssd.mindrecord", True) if args_opt.only_create_dataset: return loss_scale = float(args_opt.loss_scale) if args_opt.run_platform == "CPU": loss_scale = 1.0 # When create MindDataset, using the fitst mindrecord file, such as ssd.mindrecord0. use_multiprocessing = (args_opt.run_platform != "CPU") dataset = create_ssd_dataset(mindrecord_file, repeat_num=1, batch_size=args_opt.batch_size, device_num=device_num, rank=rank, use_multiprocessing=use_multiprocessing) dataset_size = dataset.get_dataset_size() print("Create dataset done!") backbone = ssd_mobilenet_v2() if config.model == "ssd300": ssd = SSD300(backbone=backbone, config=config) elif config.model == "ssd_mobilenet_v1_fpn": ssd = ssd_mobilenet_v1_fpn(config=config) else: raise ValueError(f'config.model: {config.model} is not supported') if args_opt.run_platform == "GPU": ssd.to_float(dtype.float16) net = SSDWithLossCell(ssd, config) init_net_param(net) if config.feature_extractor_base_param != "": param_dict = load_checkpoint(config.feature_extractor_base_param) for x in list(param_dict.keys()): param_dict["network.feature_extractor.mobilenet_v1." + x] = param_dict[x] del param_dict[x] load_param_into_net(ssd.feature_extractor.mobilenet_v1.network, param_dict) # checkpoint ckpt_config = CheckpointConfig(save_checkpoint_steps=dataset_size * args_opt.save_checkpoint_epochs) save_ckpt_path = './ckpt_' + str(rank) + '/' ckpoint_cb = ModelCheckpoint(prefix="ssd", directory=save_ckpt_path, config=ckpt_config) if args_opt.pre_trained: param_dict = load_checkpoint(args_opt.pre_trained) if args_opt.filter_weight: filter_checkpoint_parameter(param_dict) load_param_into_net(net, param_dict) if args_opt.freeze_layer == "backbone": for param in backbone.feature_1.trainable_params(): param.requires_grad = False lr = Tensor( get_lr(global_step=args_opt.pre_trained_epoch_size * dataset_size, lr_init=config.lr_init, lr_end=config.lr_end_rate * args_opt.lr, lr_max=args_opt.lr, warmup_epochs=config.warmup_epochs, total_epochs=args_opt.epoch_size, steps_per_epoch=dataset_size)) if "use_global_norm" in config and config.use_global_norm: opt = nn.Momentum( filter(lambda x: x.requires_grad, net.get_parameters()), lr, config.momentum, config.weight_decay, 1.0) net = TrainingWrapper(net, opt, loss_scale, True) else: opt = nn.Momentum( filter(lambda x: x.requires_grad, net.get_parameters()), lr, config.momentum, config.weight_decay, loss_scale) net = TrainingWrapper(net, opt, loss_scale) callback = [TimeMonitor(data_size=dataset_size), LossMonitor(), ckpoint_cb] model = Model(net) dataset_sink_mode = False if args_opt.mode == "sink" and args_opt.run_platform != "CPU": print("In sink mode, one epoch return a loss.") dataset_sink_mode = True print( "Start train SSD, the first epoch will be slower because of the graph compilation." ) model.train(args_opt.epoch_size, dataset, callbacks=callback, dataset_sink_mode=dataset_sink_mode)
def create_network(name, *args, **kwargs): if name == "ssd300": backbone = ssd_mobilenet_v2() ssd = SSD300(backbone=backbone, config=config, *args, **kwargs) return ssd raise NotImplementedError(f"{name} is not implemented in the repo")