コード例 #1
0
    if not args_opt.dataset_path:
        args_opt.dataset_path = download_dataset('cifar10')
    # build the network
    net = densenetBC_100(args_opt.num_classes)
    net.update_parameters_name(prefix='huawei')
    model = Model(net)
    # define the loss function
    net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
    # define the optimizer
    net_opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9)
    model.compile(loss_fn=net_loss, optimizer=net_opt, metrics={"Accuracy": Accuracy()})

    epoch_size = args_opt.epoch_size
    batch_size = args_opt.batch_size
    cifar10_path = args_opt.dataset_path
    save_checkpoint_epochs = args_opt.save_checkpoint_epochs
    dataset_sink_mode = not args_opt.device_target == "CPU"
    if args_opt.do_eval:  # as for evaluation, users could use model.eval
        ds_eval = create_dataset(cifar10_path, batch_size=batch_size, is_training=False)
        if args_opt.checkpoint_path:
            model.load_checkpoint(args_opt.checkpoint_path)
        acc = model.eval(ds_eval, dataset_sink_mode=dataset_sink_mode)
        print("============== Accuracy:{} ==============".format(acc))
    else:  # as for train, users could use model.train
        ds_train = create_dataset(cifar10_path, batch_size=batch_size)
        ckpoint_cb = ModelCheckpoint(prefix="densenetBC_100_cifar10", config=CheckpointConfig(
            save_checkpoint_steps=save_checkpoint_epochs * ds_train.get_dataset_size(),
            keep_checkpoint_max=10))
        model.train(epoch_size, ds_train, callbacks=[ckpoint_cb, LossMonitor()],
                    dataset_sink_mode=dataset_sink_mode)
コード例 #2
0
ファイル: vgg16.py プロジェクト: hellowaywewe/tinyms
    model.compile(loss_fn=net_loss,
                  optimizer=net_opt,
                  metrics={"Accuracy": Accuracy()})

    epoch_size = args_opt.epoch_size
    batch_size = args_opt.batch_size
    cifar10_path = args_opt.dataset_path
    save_checkpoint_epochs = args_opt.save_checkpoint_epochs
    dataset_sink_mode = not args_opt.device_target == "CPU"
    if args_opt.do_eval:  # as for evaluation, users could use model.eval
        ds_eval = create_dataset(cifar10_path,
                                 batch_size=batch_size,
                                 is_training=False)
        if args_opt.load_pretrained == 'local':
            if args_opt.checkpoint_path:
                model.load_checkpoint(args_opt.checkpoint_path)
        acc = model.eval(ds_eval, dataset_sink_mode=dataset_sink_mode)
        print("============== Accuracy:{} ==============".format(acc))
    else:  # as for train, users could use model.train
        ds_train = create_dataset(cifar10_path, batch_size=batch_size)
        ckpoint_cb = ModelCheckpoint(
            prefix="vgg16_cifar10",
            config=CheckpointConfig(
                save_checkpoint_steps=save_checkpoint_epochs *
                ds_train.get_dataset_size(),
                keep_checkpoint_max=10))
        model.train(epoch_size,
                    ds_train,
                    callbacks=[ckpoint_cb, LossMonitor()],
                    dataset_sink_mode=dataset_sink_mode)
コード例 #3
0
ファイル: ssd300.py プロジェクト: huxiaoman7/tinyms
        opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()),
                       lr, 0.9, 1.5e-4, loss_scale)
        model = Model(TrainingWrapper(net, opt, loss_scale))
        model.compile()

        ckpoint_cb = ModelCheckpoint(
            prefix="ssd300",
            config=CheckpointConfig(
                save_checkpoint_steps=args_opt.save_checkpoint_epochs *
                dataset_size,
                keep_checkpoint_max=10))
        model.train(epoch_size,
                    ds_train,
                    callbacks=[
                        ckpoint_cb,
                        LossMonitor(),
                        TimeMonitor(data_size=dataset_size)
                    ],
                    dataset_sink_mode=dataset_sink_mode)
    else:  # as for evaluation, users could use model.eval
        ds_eval = create_dataset(voc_path, batch_size=1, is_training=False)
        total = ds_eval.get_dataset_size()
        # define the infer wrapper
        eval_net = ssd300_infer(class_num=args_opt.num_classes)
        model = Model(eval_net)
        if args_opt.checkpoint_path:
            model.load_checkpoint(args_opt.checkpoint_path)
        # perform the model predict operation
        print("\n========================================\n")
        print("total images num: ", total)
        print("Processing, please wait a moment...")