示例#1
0
def test_ssd300_infer():
    context.set_context(mode=context.GRAPH_MODE, device_target="CPU")

    model = Model(ssd300_infer())
    model.compile()
    loc, score = model.predict(ts.ones((1, 3, 300, 300)))
    print(loc.asnumpy(), score.asnumpy())
示例#2
0
def test_mobilenetv2_infer():
    context.set_context(mode=context.GRAPH_MODE, device_target="CPU")

    model = Model(mobilenetv2_infer())
    model.compile()
    z = model.predict(ts.ones((1, 3, 224, 224)))
    print(z.asnumpy())
示例#3
0
def test_densenetBC_100():
    context.set_context(mode=context.GRAPH_MODE, device_target="CPU")

    model = Model(densenetBC_100())
    model.compile()
    z = model.predict(ts.ones((1, 3, 32, 32)))
    print(z.asnumpy())
示例#4
0
def test_sequential():
    context.set_context(mode=context.GRAPH_MODE, device_target="CPU")

    net = layers.SequentialLayer([
        layers.Conv2d(1, 6, 5, pad_mode='valid', weight_init="ones"),
        layers.ReLU(),
        layers.MaxPool2d(kernel_size=2, stride=2)
    ])
    model = Model(net)
    model.compile()
    z = model.predict(ts.ones((1, 1, 32, 32)))
    print(z.asnumpy())
示例#5
0
    cifar_ds = Cifar10Dataset(data_path,
                              num_parallel_workers=num_parallel_workers,
                              shuffle=True)
    cifar_ds = cifar10_transform.apply_ds(
        cifar_ds,
        repeat_size=repeat_size,
        batch_size=batch_size,
        num_parallel_workers=num_parallel_workers,
        is_training=is_training)

    return cifar_ds


if __name__ == '__main__':
    args_opt = parse_args()
    context.set_context(mode=context.GRAPH_MODE,
                        device_target=args_opt.device_target)

    # download cifar10 dataset
    if not args_opt.dataset_path:
        args_opt.dataset_path = download_dataset('cifar10')
    # build the network
    if args_opt.do_eval and args_opt.load_pretrained == 'hub':
        from tinyms import hub
        net = hub.load(args_opt.hub_uid, class_num=args_opt.num_classes)
    else:
        net = vgg16(class_num=args_opt.num_classes)
    net.update_parameters_name(prefix='huawei')
    model = Model(net)
    # define the loss function
    net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
    # define the optimizer
def run_pretrain():
    """pre-train bert_clue"""

    parser = argparse_init()
    args_opt = parser.parse_args()
    context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target, device_id=args_opt.device_id)
    context.set_context(reserve_class_name_in_scope=False)
    is_auto_enable_graph_kernel = _auto_enable_graph_kernel(args_opt.device_target, args_opt.enable_graph_kernel)
    _set_graph_kernel_context(args_opt.device_target, args_opt.enable_graph_kernel, is_auto_enable_graph_kernel)
    ckpt_save_dir = args_opt.save_checkpoint_path


    device_num = 1

    _check_compute_type(args_opt, is_auto_enable_graph_kernel)

    if args_opt.accumulation_steps > 1:
        logger.info("accumulation steps: {}".format(args_opt.accumulation_steps))
        logger.info("global batch size: {}".format(cfg.batch_size * args_opt.accumulation_steps))
        if args_opt.enable_data_sink == "true":
            args_opt.data_sink_steps *= args_opt.accumulation_steps
            logger.info("data sink steps: {}".format(args_opt.data_sink_steps))
        if args_opt.enable_save_ckpt == "true":
            args_opt.save_checkpoint_steps *= args_opt.accumulation_steps
            logger.info("save checkpoint steps: {}".format(args_opt.save_checkpoint_steps))

    ds = create_bert_dataset(
        batch_size=cfg.batch_size,
        shuffle=args_opt.do_shuffle,
        data_dir=args_opt.data_dir,
        schema_dir=args_opt.schema_dir,
        num_parallel_workers=args_opt.num_parallel_workers
    )

    net_with_loss = BertNetworkWithLoss(bert_net_cfg, True)

    new_repeat_count = args_opt.epoch_size * ds.get_dataset_size() // args_opt.data_sink_steps
    if args_opt.train_steps > 0:
        train_steps = args_opt.train_steps * args_opt.accumulation_steps
        new_repeat_count = min(new_repeat_count, train_steps // args_opt.data_sink_steps)
    else:
        args_opt.train_steps = args_opt.epoch_size * ds.get_dataset_size() // args_opt.accumulation_steps
        logger.info("train steps: {}".format(args_opt.train_steps))

    # get the optimizer followed args_opt.optimizer
    optimizer = get_optimizer(args_opt, net_with_loss, cfg, bert_net_cfg)

    # define the callbacks
    callback = [TimeMonitor(args_opt.data_sink_steps), BertLossCallBack(ds.get_dataset_size())]

    if args_opt.enable_save_ckpt == "true":
        config_ck = CheckpointConfig(save_checkpoint_steps=args_opt.save_checkpoint_steps,
                                     keep_checkpoint_max=args_opt.save_checkpoint_num)
        ckpoint_cb = ModelCheckpoint(prefix='checkpoint_bert',
                                     directory=None if ckpt_save_dir == "" else ckpt_save_dir, config=config_ck)
        callback.append(ckpoint_cb)




    if args_opt.enable_lossscale == "true":
        update_cell = DynamicLossScaleUpdateCell(loss_scale_value=cfg.loss_scale_value,
                                                 scale_factor=cfg.scale_factor,
                                                 scale_window=cfg.scale_window)
        accumulation_steps = args_opt.accumulation_steps
        enable_global_norm = cfg.enable_global_norm
        if accumulation_steps <= 1:
            if cfg.optimizer == 'AdamWeightDecay' and args_opt.device_target == 'GPU':
                net_with_grads = BertTrainOneStepWithLossScaleCellForAdam(net_with_loss, optimizer=optimizer,
                                                                          scale_update_cell=update_cell)
            else:
                net_with_grads = BertTrainOneStepWithLossScaleCell(net_with_loss, optimizer=optimizer,
                                                                   scale_update_cell=update_cell)
        else:
            allreduce_post = args_opt.distribute == "false" or args_opt.allreduce_post_accumulation == "true"
            net_with_accumulation = (BertTrainAccumulationAllReducePostWithLossScaleLayer if allreduce_post else
                                     BertTrainAccumulationAllReduceEachWithLossScaleLayer)
            net_with_grads = net_with_accumulation(net_with_loss, optimizer=optimizer,
                                                   scale_update_cell=update_cell,
                                                   accumulation_steps=accumulation_steps,
                                                   enable_global_norm=enable_global_norm)
    else:
        net_with_grads = BertTrainOneStepCell(net_with_loss, optimizer=optimizer)

    model = Model(net_with_grads)

    if args_opt.load_checkpoint_path:
        model.load_checkpoint(args_opt.load_checkpoint_path)
    model.train(new_repeat_count, ds, callbacks=callback,
                dataset_sink_mode=(args_opt.enable_data_sink == "true"), sink_size=args_opt.data_sink_steps)
def _set_graph_kernel_context(device_target, enable_graph_kernel, is_auto_enable_graph_kernel):
    if enable_graph_kernel == "true" or is_auto_enable_graph_kernel:
        if device_target == 'GPU':
            context.set_context(enable_graph_kernel=True)
        else:
            logger.warning('Graph kernel only supports GPU back-end now, run with graph kernel off.')
示例#8
0
def run_classifier():
    """run classifier task"""
    parser = argparse.ArgumentParser(description="run classifier")
    parser.add_argument("--device_target",
                        type=str,
                        default="Ascend",
                        choices=["Ascend", "GPU"],
                        help="Device type, default is Ascend")
    parser.add_argument(
        "--assessment_method",
        type=str,
        default="Accuracy",
        choices=["Mcc", "Spearman_correlation", "Accuracy", "F1"],
        help=
        "assessment_method including [Mcc, Spearman_correlation, Accuracy, F1],\
                             default is Accuracy")
    parser.add_argument("--do_train",
                        action="store_true",
                        help="Enable train, default is false")
    parser.add_argument("--do_eval",
                        action="store_true",
                        help="Enable eval, default is false")
    parser.add_argument("--device_id",
                        type=int,
                        default=0,
                        help="Device id, default is 0.")
    parser.add_argument("--epoch_num",
                        type=int,
                        default="1",
                        help="Epoch number, default is 1.")
    parser.add_argument("--num_class",
                        type=int,
                        default="2",
                        help="The number of class, default is 2.")
    parser.add_argument("--train_data_shuffle",
                        type=bool,
                        default=True,
                        help="Enable train data shuffle, default is true")
    parser.add_argument("--eval_data_shuffle",
                        type=bool,
                        default=False,
                        help="Enable eval data shuffle, default is false")
    parser.add_argument("--save_finetune_checkpoint_path",
                        type=str,
                        default="",
                        help="Save checkpoint path")
    parser.add_argument("--load_pretrain_checkpoint_path",
                        type=str,
                        default="",
                        help="Load checkpoint file path")
    parser.add_argument("--load_finetune_checkpoint_path",
                        type=str,
                        default="",
                        help="Load checkpoint file path")
    parser.add_argument("--train_data_file_path",
                        type=str,
                        default="",
                        help="Data path, it is better to use absolute path")
    parser.add_argument("--eval_data_file_path",
                        type=str,
                        default="",
                        help="Data path, it is better to use absolute path")
    parser.add_argument("--schema_file_path",
                        type=str,
                        default="",
                        help="Schema path, it is better to use absolute path")
    args_opt = parser.parse_args()
    epoch_num = args_opt.epoch_num
    assessment_method = args_opt.assessment_method.lower()
    load_pretrain_checkpoint_path = args_opt.load_pretrain_checkpoint_path
    save_finetune_checkpoint_path = args_opt.save_finetune_checkpoint_path
    load_finetune_checkpoint_path = args_opt.load_finetune_checkpoint_path

    if args_opt.do_train and args_opt.do_eval:
        raise ValueError(
            "At least one of 'do_train' or 'do_eval' must be true")
    if args_opt.do_train and args_opt.train_data_file_path == "":
        raise ValueError(
            "'train_data_file_path' must be set when do finetune task")
    if args_opt.do_eval and args_opt.eval_data_file_path == "":
        raise ValueError(
            "'eval_data_file_path' must be set when do evaluation task")

    if args_opt.device_target == "Ascend":
        context.set_context(mode=context.GRAPH_MODE,
                            device_target="Ascend",
                            device_id=args_opt.device_id)
    elif args_opt.device_target == "GPU":
        context.set_context(mode=context.GRAPH_MODE,
                            device_target="GPU",
                            device_id=args_opt.device_id)
        context.set_context(enable_graph_kernel=True)
        if bert_net_cfg.compute_type != ts.float32:
            logger.warning('GPU only support fp32 temporarily, run with fp32.')
            bert_net_cfg.compute_type = ts.float32
    else:
        raise Exception("Target error, GPU or Ascend is supported.")

    netwithloss = BertCLS(bert_net_cfg,
                          True,
                          num_labels=args_opt.num_class,
                          dropout_prob=0.1,
                          assessment_method=assessment_method)

    if args_opt.do_train:
        ds = create_classification_dataset(
            batch_size=optimizer_cfg.batch_size,
            repeat_count=1,
            assessment_method=assessment_method,
            data_file_path=args_opt.train_data_file_path,
            schema_file_path=args_opt.schema_file_path,
            do_shuffle=args_opt.train_data_shuffle)
        do_train(ds, netwithloss, load_pretrain_checkpoint_path,
                 save_finetune_checkpoint_path, epoch_num)

        if args_opt.do_eval:
            if save_finetune_checkpoint_path == "":
                load_finetune_checkpoint_dir = _cur_dir
            else:
                load_finetune_checkpoint_dir = make_directory(
                    save_finetune_checkpoint_path)
            load_finetune_checkpoint_path = LoadNewestCkpt(
                load_finetune_checkpoint_dir, ds.get_dataset_size(), epoch_num,
                "classifier")

    if args_opt.do_eval:
        ds = create_classification_dataset(
            batch_size=optimizer_cfg.batch_size,
            repeat_count=1,
            assessment_method=assessment_method,
            data_file_path=args_opt.eval_data_file_path,
            schema_file_path=args_opt.schema_file_path,
            do_shuffle=args_opt.eval_data_shuffle)
        do_eval(ds, BertCLS, args_opt.num_class, assessment_method,
                load_finetune_checkpoint_path)