예제 #1
0
파일: eval.py 프로젝트: wenkai128/mindspore
def ssd_eval(dataset_path, ckpt_path):
    """SSD evaluation."""

    ds = create_ssd_dataset(dataset_path,
                            batch_size=1,
                            repeat_num=1,
                            is_training=False)
    net = SSD300(ssd_mobilenet_v2(), ConfigSSD(), is_training=False)
    print("Load Checkpoint!")
    param_dict = load_checkpoint(ckpt_path)
    net.init_parameters_data()
    load_param_into_net(net, param_dict)

    net.set_train(False)
    i = 1.
    total = ds.get_dataset_size()
    start = time.time()
    pred_data = []
    print("\n========================================\n")
    print("total images num: ", total)
    print("Processing, please wait a moment.")
    for data in ds.create_dict_iterator():
        img_np = data['image']
        image_shape = data['image_shape']
        annotation = data['annotation']

        output = net(Tensor(img_np))
        for batch_idx in range(img_np.shape[0]):
            pred_data.append({
                "boxes": output[0].asnumpy()[batch_idx],
                "box_scores": output[1].asnumpy()[batch_idx],
                "annotation": annotation,
                "image_shape": image_shape
            })
        percent = round(i / total * 100, 2)

        print(f'    {str(percent)} [{i}/{total}]', end='\r')
        i += 1
    cost_time = int((time.time() - start) * 1000)
    print(f'    100% [{total}/{total}] cost {cost_time} ms')
    mAP = metrics(pred_data)
    print("\n========================================\n")
    print(f"mAP: {mAP}")
예제 #2
0
def main():
    parser = argparse.ArgumentParser(description="SSD training")
    parser.add_argument("--only_create_dataset",
                        type=bool,
                        default=False,
                        help="If set it true, only create "
                        "Mindrecord, default is false.")
    parser.add_argument("--distribute",
                        type=bool,
                        default=False,
                        help="Run distribute, default is false.")
    parser.add_argument("--device_id",
                        type=int,
                        default=0,
                        help="Device id, default is 0.")
    parser.add_argument("--device_num",
                        type=int,
                        default=1,
                        help="Use device nums, default is 1.")
    parser.add_argument("--lr",
                        type=float,
                        default=0.25,
                        help="Learning rate, default is 0.25.")
    parser.add_argument("--mode",
                        type=str,
                        default="sink",
                        help="Run sink mode or not, default is sink.")
    parser.add_argument("--dataset",
                        type=str,
                        default="coco",
                        help="Dataset, defalut is coco.")
    parser.add_argument("--epoch_size",
                        type=int,
                        default=70,
                        help="Epoch size, default is 70.")
    parser.add_argument("--batch_size",
                        type=int,
                        default=32,
                        help="Batch size, default is 32.")
    parser.add_argument("--pre_trained",
                        type=str,
                        default=None,
                        help="Pretrained Checkpoint file path.")
    parser.add_argument("--pre_trained_epoch_size",
                        type=int,
                        default=0,
                        help="Pretrained epoch size.")
    parser.add_argument("--save_checkpoint_epochs",
                        type=int,
                        default=5,
                        help="Save checkpoint epochs, default is 5.")
    parser.add_argument("--loss_scale",
                        type=int,
                        default=1024,
                        help="Loss scale, default is 1024.")
    args_opt = parser.parse_args()

    context.set_context(mode=context.GRAPH_MODE,
                        device_target="Ascend",
                        device_id=args_opt.device_id)

    if args_opt.distribute:
        device_num = args_opt.device_num
        context.reset_auto_parallel_context()
        context.set_auto_parallel_context(
            parallel_mode=ParallelMode.DATA_PARALLEL,
            mirror_mean=True,
            device_num=device_num)
        init()
        rank = args_opt.device_id % device_num
    else:
        rank = 0
        device_num = 1

    print("Start create dataset!")

    # It will generate mindrecord file in args_opt.mindrecord_dir,
    # and the file name is ssd.mindrecord0, 1, ... file_num.

    config = ConfigSSD()
    prefix = "ssd.mindrecord"
    mindrecord_dir = config.MINDRECORD_DIR
    mindrecord_file = os.path.join(mindrecord_dir, prefix + "0")
    if not os.path.exists(mindrecord_file):
        if not os.path.isdir(mindrecord_dir):
            os.makedirs(mindrecord_dir)
        if args_opt.dataset == "coco":
            if os.path.isdir(config.COCO_ROOT):
                print("Create Mindrecord.")
                data_to_mindrecord_byte_image("coco", True, prefix)
                print("Create Mindrecord Done, at {}".format(mindrecord_dir))
            else:
                print("COCO_ROOT not exits.")
        else:
            if os.path.isdir(config.IMAGE_DIR) and os.path.exists(
                    config.ANNO_PATH):
                print("Create Mindrecord.")
                data_to_mindrecord_byte_image("other", True, prefix)
                print("Create Mindrecord Done, at {}".format(mindrecord_dir))
            else:
                print("IMAGE_DIR or ANNO_PATH not exits.")

    if not args_opt.only_create_dataset:
        loss_scale = float(args_opt.loss_scale)

        # When create MindDataset, using the fitst mindrecord file, such as ssd.mindrecord0.
        dataset = create_ssd_dataset(mindrecord_file,
                                     repeat_num=args_opt.epoch_size,
                                     batch_size=args_opt.batch_size,
                                     device_num=device_num,
                                     rank=rank)

        dataset_size = dataset.get_dataset_size()
        print("Create dataset done!")

        ssd = SSD300(backbone=ssd_mobilenet_v2(), config=config)
        net = SSDWithLossCell(ssd, config)
        init_net_param(net)

        # checkpoint
        ckpt_config = CheckpointConfig(save_checkpoint_steps=dataset_size *
                                       args_opt.save_checkpoint_epochs)
        ckpoint_cb = ModelCheckpoint(prefix="ssd",
                                     directory=None,
                                     config=ckpt_config)

        if args_opt.pre_trained:
            if args_opt.pre_trained_epoch_size <= 0:
                raise KeyError(
                    "pre_trained_epoch_size must be greater than 0.")
            param_dict = load_checkpoint(args_opt.pre_trained)
            load_param_into_net(net, param_dict)

        lr = Tensor(
            get_lr(global_step=args_opt.pre_trained_epoch_size * dataset_size,
                   lr_init=0,
                   lr_end=0,
                   lr_max=args_opt.lr,
                   warmup_epochs=max(350 // 20, 1),
                   total_epochs=350,
                   steps_per_epoch=dataset_size))
        opt = nn.Momentum(
            filter(lambda x: x.requires_grad, net.get_parameters()), lr, 0.9,
            0.0001, loss_scale)
        net = TrainingWrapper(net, opt, loss_scale)

        callback = [
            TimeMonitor(data_size=dataset_size),
            LossMonitor(), ckpoint_cb
        ]

        model = Model(net)
        dataset_sink_mode = False
        if args_opt.mode == "sink":
            print("In sink mode, one epoch return a loss.")
            dataset_sink_mode = True
        print(
            "Start train SSD, the first epoch will be slower because of the graph compilation."
        )
        model.train(args_opt.epoch_size,
                    dataset,
                    callbacks=callback,
                    dataset_sink_mode=dataset_sink_mode)