Esempio n. 1
0
def test_ssd300_infer():
    context.set_context(mode=context.GRAPH_MODE, device_target="CPU")

    model = Model(ssd300_infer())
    model.compile()
    loc, score = model.predict(ts.ones((1, 3, 300, 300)))
    print(loc.asnumpy(), score.asnumpy())
Esempio n. 2
0
def test_densenetBC_100():
    context.set_context(mode=context.GRAPH_MODE, device_target="CPU")

    model = Model(densenetBC_100())
    model.compile()
    z = model.predict(ts.ones((1, 3, 32, 32)))
    print(z.asnumpy())
Esempio n. 3
0
def test_mobilenetv2_infer():
    context.set_context(mode=context.GRAPH_MODE, device_target="CPU")

    model = Model(mobilenetv2_infer())
    model.compile()
    z = model.predict(ts.ones((1, 3, 224, 224)))
    print(z.asnumpy())
Esempio n. 4
0
def test_sequential():
    context.set_context(mode=context.GRAPH_MODE, device_target="CPU")

    net = layers.SequentialLayer([
        layers.Conv2d(1, 6, 5, pad_mode='valid', weight_init="ones"),
        layers.ReLU(),
        layers.MaxPool2d(kernel_size=2, stride=2)
    ])
    model = Model(net)
    model.compile()
    z = model.predict(ts.ones((1, 1, 32, 32)))
    print(z.asnumpy())
Esempio n. 5
0
        args_opt.dataset_path = download_dataset('cifar10')
    # build the network
    if args_opt.do_eval and args_opt.load_pretrained == 'hub':
        from tinyms import hub
        net = hub.load(args_opt.hub_uid, class_num=args_opt.num_classes)
    else:
        net = vgg16(class_num=args_opt.num_classes)
    net.update_parameters_name(prefix='huawei')
    model = Model(net)
    # define the loss function
    net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
    # define the optimizer
    net_opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()),
                       0.01, 0.9)
    model.compile(loss_fn=net_loss,
                  optimizer=net_opt,
                  metrics={"Accuracy": Accuracy()})

    epoch_size = args_opt.epoch_size
    batch_size = args_opt.batch_size
    cifar10_path = args_opt.dataset_path
    save_checkpoint_epochs = args_opt.save_checkpoint_epochs
    dataset_sink_mode = not args_opt.device_target == "CPU"
    if args_opt.do_eval:  # as for evaluation, users could use model.eval
        ds_eval = create_dataset(cifar10_path,
                                 batch_size=batch_size,
                                 is_training=False)
        if args_opt.load_pretrained == 'local':
            if args_opt.checkpoint_path:
                model.load_checkpoint(args_opt.checkpoint_path)
        acc = model.eval(ds_eval, dataset_sink_mode=dataset_sink_mode)
Esempio n. 6
0
    lr_max = 0.001
    lr_init_scale = 0.01
    lr_end_scale = 0.01
    lr = mobilenetv2_lr(global_step=0,
                        lr_init=lr_max * lr_init_scale,
                        lr_end=lr_max * lr_end_scale,
                        lr_max=lr_max,
                        warmup_epochs=2,
                        total_epochs=epoch_size,
                        steps_per_epoch=step_size)
    # define the optimizer
    loss_scale = FixedLossScaleManager(1024, drop_overflow_update=False)
    opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr,
                   0.9, 4e-5, 1024)
    model.compile(loss_fn=loss,
                  optimizer=opt,
                  metrics={"Accuracy": Accuracy()},
                  loss_scale_manager=loss_scale)

    if not args_opt.do_eval:  # as for train, users could use model.train
        # configure checkpoint to save weights and do training job
        save_checkpoint_epochs = args_opt.save_checkpoint_epochs
        ckpoint_cb = mobilenetv2_cb(
            device_target=args_opt.device_target,
            lr=lr,
            is_saving_checkpoint=args_opt.is_saving_checkpoint,
            save_checkpoint_epochs=args_opt.save_checkpoint_epochs,
            step_size=step_size)
        model.train(epoch_size,
                    ds_train,
                    callbacks=ckpoint_cb,
                    dataset_sink_mode=dataset_sink_mode)
Esempio n. 7
0
        init_net_param(net)
        # define the optimizer
        lr = ssd300_lr(global_step=args_opt.pre_trained_epoch_size *
                       dataset_size,
                       lr_init=0.001,
                       lr_end=0.001 * args_opt.lr,
                       lr_max=args_opt.lr,
                       warmup_epochs=2,
                       total_epochs=args_opt.epoch_size,
                       steps_per_epoch=dataset_size)
        loss_scale = 1.0 if args_opt.device_target == "CPU" else float(
            args_opt.loss_scale)
        opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()),
                       lr, 0.9, 1.5e-4, loss_scale)
        model = Model(TrainingWrapper(net, opt, loss_scale))
        model.compile()

        ckpoint_cb = ModelCheckpoint(
            prefix="ssd300",
            config=CheckpointConfig(
                save_checkpoint_steps=args_opt.save_checkpoint_epochs *
                dataset_size,
                keep_checkpoint_max=10))
        model.train(epoch_size,
                    ds_train,
                    callbacks=[
                        ckpoint_cb,
                        LossMonitor(),
                        TimeMonitor(data_size=dataset_size)
                    ],
                    dataset_sink_mode=dataset_sink_mode)
Esempio n. 8
0
if __name__ == "__main__":
    args_opt = parse_args()
    context.set_context(mode=context.GRAPH_MODE,
                        device_target=args_opt.device_target)

    # download mnist dataset
    if not args_opt.dataset_path:
        args_opt.dataset_path = download_dataset('mnist')
    # build the network
    net = lenet5()
    model = Model(net)
    # define the loss function
    net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
    # define the optimizer
    net_opt = Momentum(net.trainable_params(), 0.01, 0.9)
    model.compile(net_loss, net_opt, metrics={"Accuracy": Accuracy()})

    epoch_size = args_opt.epoch_size
    batch_size = args_opt.batch_size
    mnist_path = args_opt.dataset_path
    dataset_sink_mode = not args_opt.device_target == "CPU"

    if args_opt.do_eval:  # as for evaluation, users could use model.eval
        print("============== Starting Evaluating ==============")
        # load testing dataset
        ds_eval = create_dataset(os.path.join(mnist_path, "test"))
        # load the saved model for evaluation
        if args_opt.checkpoint_path:
            model.load_checkpoint(args_opt.checkpoint_path)
        acc = model.eval(ds_eval, dataset_sink_mode=dataset_sink_mode)
        print("============== Accuracy:{} ==============".format(acc))
Esempio n. 9
0
    net = DeepFM(field_size=39,
                 vocab_size=184965,
                 embed_size=80,
                 convert_dtype=True)
    # build train network
    train_net = DeepFMTrainModel(DeepFMWithLoss(net))
    # build eval network
    eval_net = DeepFMEvalModel(net)
    # build model
    model = Model(train_net)
    # loss/ckpt/metric callbacks
    loss_tm = LossTimeMonitorV2()
    config_ckpt = CheckpointConfig(save_checkpoint_steps=data_size // 100,
                                   keep_checkpoint_max=10)
    model_ckpt = ModelCheckpoint(prefix='deepfm',
                                 directory=checkpoint_dir,
                                 config=config_ckpt)
    auc_metric = AUCMetric()

    model.compile(eval_network=eval_net,
                  metrics={"auc_metric": auc_metric},
                  amp_level='O0')
    print("====== start train model ======", flush=True)
    model.train(epoch=epoch_size,
                train_dataset=train_ds,
                callbacks=[loss_tm, model_ckpt],
                dataset_sink_mode=dataset_sink_mode)
    print("====== start eval model ======", flush=True)
    acc = model.eval(eval_ds)
    print("====== eval acc: {} ======".format(acc), flush=True)