コード例 #1
0
ファイル: classifier_metric.py プロジェクト: huawei-noah/vega
    def __call__(self, output, target, *args, **kwargs):
        """Forward and calculate accuracy."""
        if len(self.topk) == 1:
            return Accuracy()

        else:
            return {
                "accuracy": Accuracy(),
                "accuracy_top1": nn.Top1CategoricalAccuracy(),
                "accuracy_top5": nn.Top5CategoricalAccuracy()
            }
コード例 #2
0
def train_lenet_quant():
    context.set_context(mode=context.GRAPH_MODE, device_target=device_target)
    cfg = quant_cfg
    ckpt_path = './ckpt_lenet_noquant-10_1875.ckpt'
    ds_train = create_dataset(os.path.join(data_path, "train"), cfg.batch_size, 1)
    step_size = ds_train.get_dataset_size()

    # define fusion network
    network = LeNet5Fusion(cfg.num_classes)

    # load quantization aware network checkpoint
    param_dict = load_checkpoint(ckpt_path)
    load_nonquant_param_into_quant_net(network, param_dict)

    # convert fusion network to quantization aware network
    network = quant.convert_quant_network(network, quant_delay=900, bn_fold=False, per_channel=[True, False],
                                          symmetric=[False, False])

    # define network loss
    net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
    # define network optimization
    net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum)

    # call back and monitor
    config_ckpt = CheckpointConfig(save_checkpoint_steps=cfg.epoch_size * step_size,
                                   keep_checkpoint_max=cfg.keep_checkpoint_max)
    ckpt_callback = ModelCheckpoint(prefix="ckpt_lenet_quant", config=config_ckpt)

    # define model
    model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})

    print("============== Starting Training ==============")
    model.train(cfg['epoch_size'], ds_train, callbacks=[ckpt_callback, LossMonitor()],
                dataset_sink_mode=True)
    print("============== End Training ==============")
コード例 #3
0
ファイル: test_summary.py プロジェクト: zuoshou030/mindspore
    def _run_network(self, dataset_sink_mode=True):
        lenet = LeNet5()
        loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False,
                                                sparse=True,
                                                reduction="mean")
        optim = Momentum(lenet.trainable_params(),
                         learning_rate=0.1,
                         momentum=0.9)
        model = Model(lenet,
                      loss_fn=loss,
                      optimizer=optim,
                      metrics={'acc': Accuracy()})
        summary_dir = tempfile.mkdtemp(dir=self.base_summary_dir)
        summary_collector = SummaryCollector(summary_dir=summary_dir,
                                             collect_freq=1)

        ds_train = create_dataset(os.path.join(self.mnist_path, "train"))
        model.train(1,
                    ds_train,
                    callbacks=[summary_collector],
                    dataset_sink_mode=dataset_sink_mode)

        ds_eval = create_dataset(os.path.join(self.mnist_path, "test"))
        model.eval(ds_eval,
                   dataset_sink_mode=dataset_sink_mode,
                   callbacks=[summary_collector])

        self._check_summary_result(summary_dir)
コード例 #4
0
ファイル: train.py プロジェクト: kungfu-ml/kungfu-mindspore
def run(args):
    ms.context.set_context(mode=ms.context.GRAPH_MODE,
                           device_target=args.device)
    dataset_sink_mode = False

    download_dataset(args.data_dir)

    # define the loss function
    net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')

    # create the network
    network = LeNet5()
    # define the optimizer
    net_opt = build_optimizer(args, network)
    config_ck = CheckpointConfig(save_checkpoint_steps=1875,
                                 keep_checkpoint_max=10)
    # save the network model and parameters for subsequence fine-tuning
    ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", config=config_ck)
    # group layers into an object with training and evaluation features
    model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})

    if args.init_ckpt:
        load_ckpt(network, args.init_ckpt)

    train_net(network, model, args, ckpoint_cb, dataset_sink_mode)
コード例 #5
0
def test_net(network, data_path, ckpt):
    """define the evaluation method"""
    print("============== Starting Testing ==============")
    #load the saved model for evaluation
    load_checkpoint(ckpt, net=network)
    #load testing dataset
    ds_eval = create_dataset(False, data_path)

    # config = GPTConfig(batch_size=4,
    #                    seq_length=1024,
    #                    vocab_size=50257,
    #                    embedding_size=1024,
    #                    num_layers=24,
    #                    num_heads=16,
    #                    expand_ratio=4,
    #                    post_layernorm_residual=False,
    #                    dropout_rate=0.1,
    #                    compute_dtype=mstype.float16,
    #                    use_past=False)
    # loss = CrossEntropyLoss(config)

    net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
    model = Model(resnet, net_loss, metrics={"Accuracy": Accuracy()})
    # model = Model(resnet, net_loss, metrics={"Accuracy": Accuracy()}, amp_level="O3")
    acc = model.eval(ds_eval, dataset_sink_mode=False)
    print("============== Accuracy:{} ==============".format(acc))
コード例 #6
0
    def __init__(self, model=None):
        """Initializing the trainer with the provided model.

        Arguments:
        client_id: The ID of the client using this trainer (optional).
        model: The model to train.
        """
        super().__init__()

        if hasattr(Config().trainer, 'cpuonly') and Config().trainer.cpuonly:
            mindspore.context.set_context(mode=mindspore.context.PYNATIVE_MODE,
                                          device_target='CPU')
        else:
            mindspore.context.set_context(mode=mindspore.context.PYNATIVE_MODE,
                                          device_target='GPU')

        if model is None:
            self.model = models_registry.get()

        # Initializing the loss criterion
        loss_criterion = SoftmaxCrossEntropyWithLogits(sparse=True,
                                                       reduction='mean')

        # Initializing the optimizer
        optimizer = nn.Momentum(self.model.trainable_params(),
                                Config().trainer.learning_rate,
                                Config().trainer.momentum)

        self.mindspore_model = mindspore.Model(
            self.model,
            loss_criterion,
            optimizer,
            metrics={"Accuracy": Accuracy()})
コード例 #7
0
def eval_quant():
    context.set_context(mode=context.GRAPH_MODE, device_target=device_target)
    cfg = quant_cfg
    ds_eval = create_dataset(os.path.join(data_path, "test"), cfg.batch_size,
                             1)
    ckpt_path = './ckpt_lenet_quant-10_937.ckpt'
    # define fusion network
    network = LeNet5Fusion(cfg.num_classes)
    # convert fusion network to quantization aware network
    quantizer = QuantizationAwareTraining(quant_delay=0,
                                          bn_fold=False,
                                          freeze_bn=10000,
                                          per_channel=[True, False],
                                          symmetric=[True, False])
    network = quantizer.quantize(network)

    # define loss
    net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
    # define network optimization
    net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum)

    # call back and monitor
    model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})

    # load quantization aware network checkpoint
    param_dict = load_checkpoint(ckpt_path)
    not_load_param = load_param_into_net(network, param_dict)
    if not_load_param:
        raise ValueError("Load param into net fail!")

    print("============== Starting Testing ==============")
    acc = model.eval(ds_eval, dataset_sink_mode=True)
    print("============== {} ==============".format(acc))
    assert acc['Accuracy'] > 0.98
コード例 #8
0
ファイル: test_profiler.py プロジェクト: yrpang/mindspore
    def test_gpu_profiler(self):
        context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
        profiler = Profiler(output_path='data')
        profiler_name = os.listdir(os.path.join(os.getcwd(), 'data'))[0]
        self.profiler_path = os.path.join(os.getcwd(),
                                          f'data/{profiler_name}/')
        ds_train = create_dataset(os.path.join(self.mnist_path, "train"))
        if ds_train.get_dataset_size() == 0:
            raise ValueError(
                "Please check dataset size > 0 and batch_size <= dataset size")

        lenet = LeNet5()
        loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
        optim = Momentum(lenet.trainable_params(),
                         learning_rate=0.1,
                         momentum=0.9)
        model = Model(lenet,
                      loss_fn=loss,
                      optimizer=optim,
                      metrics={'acc': Accuracy()})

        model.train(1, ds_train, dataset_sink_mode=True)
        profiler.analyse()

        self._check_gpu_profiling_file()
コード例 #9
0
def mnist_train(epoch_size, batch_size, lr, momentum):
    mnist_path = "./MNIST_unzip/"
    ds = generate_mnist_dataset(os.path.join(mnist_path, "train"),
                                batch_size=batch_size,
                                repeat_size=1)

    network = LeNet5()
    net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False,
                                                sparse=True,
                                                reduction="mean")
    net_opt = nn.Momentum(network.trainable_params(), lr, momentum)
    config_ck = CheckpointConfig(save_checkpoint_steps=1875,
                                 keep_checkpoint_max=10)
    ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet",
                                 directory="./trained_ckpt_file/",
                                 config=config_ck)
    model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})

    LOGGER.info(TAG, "============== Starting Training ==============")
    model.train(epoch_size,
                ds,
                callbacks=[ckpoint_cb, LossMonitor()],
                dataset_sink_mode=False)

    LOGGER.info(TAG, "============== Starting Testing ==============")
    ckpt_file_name = "trained_ckpt_file/checkpoint_lenet-10_1875.ckpt"
    param_dict = load_checkpoint(ckpt_file_name)
    load_param_into_net(network, param_dict)
    ds_eval = generate_mnist_dataset(os.path.join(mnist_path, "test"),
                                     batch_size=batch_size)
    acc = model.eval(ds_eval, dataset_sink_mode=False)
    LOGGER.info(TAG, "============== Accuracy: %s ==============", acc)
コード例 #10
0
    def __eval(self):
        # import
        from mindspore import Model, load_param_into_net, load_checkpoint
        from mindspore.nn.metrics import Accuracy

        # load params
        if self.__ckpt_path:
            load_param_into_net(self.__network,
                                load_checkpoint(self.__ckpt_path))
        else:
            print(
                "Warning: `ckpt_path` is None, Please call func: `set_ckpt_path($ckpt_path)`."
            )
            return

        # loss_fn & optimizer & metrics
        model = Model(self.__network,
                      loss_fn=self.__loss_fn,
                      optimizer=self.__optimizer,
                      metrics={"Accuracy": Accuracy()}
                      if self.__metrics is None else self.__metrics)

        # eval
        print(">>>>>>>>>>>>>>>>>>>>> eval start ... <<<<<<<<<<<<<<<<<<<<<<")
        result = model.eval(self.__dataset)
        print(
            ">>>>>>>>>>>>>>>>>>>>> eval success ~ <<<<<<<<<<<<<<<<<<<<<<: result=",
            result)
コード例 #11
0
def eval_net():
    '''eval net'''
    if config.dataset == 'MR':
        instance = MovieReview(root_dir=config.data_path, maxlen=config.word_len, split=0.9)
    elif config.dataset == 'SUBJ':
        instance = Subjectivity(root_dir=config.data_path, maxlen=config.word_len, split=0.9)
    elif config.dataset == 'SST2':
        instance = SST2(root_dir=config.data_path, maxlen=config.word_len, split=0.9)
    device_target = config.device_target
    context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
    if device_target == "Ascend":
        context.set_context(device_id=get_device_id())
    dataset = instance.create_test_dataset(batch_size=config.batch_size)
    loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
    net = TextCNN(vocab_len=instance.get_dict_len(), word_len=config.word_len,
                  num_classes=config.num_classes, vec_length=config.vec_length)
    opt = nn.Adam(filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate=0.001,
                  weight_decay=float(config.weight_decay))

    param_dict = load_checkpoint(config.checkpoint_file_path)
    print("load checkpoint from [{}].".format(config.checkpoint_file_path))

    load_param_into_net(net, param_dict)
    net.set_train(False)
    model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc': Accuracy()})

    acc = model.eval(dataset)
    print("accuracy: ", acc)
コード例 #12
0
def train_lenet5():
    """
    Train lenet5
    """
    config.ckpt_path = config.output_path
    context.set_context(mode=context.GRAPH_MODE,
                        device_target=config.device_target)
    ds_train = create_lenet_dataset(os.path.join(config.data_path, "train"),
                                    config.batch_size,
                                    num_parallel_workers=1)
    if ds_train.get_dataset_size() == 0:
        raise ValueError(
            "Please check dataset size > 0 and batch_size <= dataset size")

    network = LeNet5(config.num_classes)
    net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
    net_opt = nn.Momentum(network.trainable_params(), config.lr,
                          config.momentum)
    time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())
    config_ck = CheckpointConfig(
        save_checkpoint_steps=config.save_checkpoint_steps,
        keep_checkpoint_max=config.keep_checkpoint_max)
    ckpoint_cb = ModelCheckpoint(
        prefix="checkpoint_lenet",
        directory=None if config.ckpt_path == "" else config.ckpt_path,
        config=config_ck)

    if config.device_target != "Ascend":
        model = Model(network,
                      net_loss,
                      net_opt,
                      metrics={"Accuracy": Accuracy()})
    else:
        model = Model(network,
                      net_loss,
                      net_opt,
                      metrics={"Accuracy": Accuracy()},
                      amp_level="O2")

    print("============== Starting Training ==============")
    model.train(config.epoch_size,
                ds_train,
                callbacks=[time_cb, ckpoint_cb,
                           LossMonitor()])
コード例 #13
0
ファイル: test_accuracy.py プロジェクト: zsangel378/mindspore
def test_multilabel_accuracy():
    x = Tensor(np.array([[0, 1, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]]))
    y = Tensor(np.array([[0, 1, 1, 1], [0, 1, 1, 1], [0, 0, 0, 1]]))
    metric = Accuracy('multilabel')
    metric.clear()
    metric.update(x, y)
    accuracy = metric.eval()
    assert accuracy == 1 / 3
コード例 #14
0
ファイル: test_accuracy.py プロジェクト: MirrorFork/mindspore
def test_classification_accuracy_indexes_awareness():
    """A indexes aware version of test_classification_accuracy"""
    x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]))
    y = Tensor(np.array([1, 0, 1]))
    y2 = Tensor(np.array([0, 0, 1]))
    metric = Accuracy('classification').set_indexes([0, 2])
    metric.clear()
    metric.update(x, y, y2)
    accuracy = metric.eval()
    assert math.isclose(accuracy, 1 / 3)
コード例 #15
0
ファイル: test_accuracy.py プロジェクト: zsangel378/mindspore
def test_shape_accuracy4():
    x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]))
    y = Tensor(np.array(1))
    metric = Accuracy('classification')
    metric.clear()
    with pytest.raises(ValueError):
        metric.update(x, y)
コード例 #16
0
ファイル: test_accuracy.py プロジェクト: zsangel378/mindspore
def test_shape_accuracy2():
    x = Tensor(np.array([[0, 1, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]]))
    y = Tensor(np.array([0, 1, 1, 1]))
    metric = Accuracy('multilabel')
    metric.clear()
    with pytest.raises(ValueError):
        metric.update(x, y)
コード例 #17
0
def eval_alexnet():
    print("============== Starting Testing ==============")

    device_num = get_device_num()
    if device_num > 1:
        # context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
        context.set_context(mode=context.GRAPH_MODE,
                            device_target='Davinci',
                            save_graphs=False)
        if config.device_target == "Ascend":
            context.set_context(device_id=get_device_id())
            init()
        elif config.device_target == "GPU":
            init()

    if config.dataset_name == 'cifar10':
        network = AlexNet(config.num_classes, phase='test')
        loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
        opt = nn.Momentum(network.trainable_params(), config.learning_rate,
                          config.momentum)
        ds_eval = create_dataset_cifar10(config.data_path, config.batch_size, status="test", \
            target=config.device_target)
        param_dict = load_checkpoint(load_path)
        print("load checkpoint from [{}].".format(load_path))
        load_param_into_net(network, param_dict)
        network.set_train(False)
        model = Model(network, loss, opt, metrics={"Accuracy": Accuracy()})

    elif config.dataset_name == 'imagenet':
        network = AlexNet(config.num_classes, phase='test')
        loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
        ds_eval = create_dataset_imagenet(config.data_path,
                                          config.batch_size,
                                          training=False)
        param_dict = load_checkpoint(load_path)
        print("load checkpoint from [{}].".format(load_path))
        load_param_into_net(network, param_dict)
        network.set_train(False)
        model = Model(network,
                      loss_fn=loss,
                      metrics={'top_1_accuracy', 'top_5_accuracy'})

    else:
        raise ValueError("Unsupported dataset.")

    if ds_eval.get_dataset_size() == 0:
        raise ValueError(
            "Please check dataset size > 0 and batch_size <= dataset size")

    result = model.eval(ds_eval, dataset_sink_mode=config.dataset_sink_mode)
    print("result : {}".format(result))
コード例 #18
0
ファイル: test_accuracy.py プロジェクト: zsangel378/mindspore
def test_classification_accuracy():
    """test_classification_accuracy"""
    x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]))
    y = Tensor(np.array([1, 0, 1]))
    y2 = Tensor(np.array([[0, 1], [1, 0], [0, 1]]))
    metric = Accuracy('classification')
    metric.clear()
    metric.update(x, y)
    accuracy = metric.eval()
    accuracy2 = metric(x, y2)
    assert math.isclose(accuracy, 2 / 3)
    assert math.isclose(accuracy2, 2 / 3)
コード例 #19
0
def test_all_trains():
    ds_train = create_dataset(
        os.path.join('/home/workspace/mindspore_dataset/mnist', "train"), 32,
        1)

    network = LeNet5(10)
    net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
    net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)
    time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())

    model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})

    print("============== Starting Training ==============")
    model.train(1, ds_train, callbacks=[time_cb, LossMonitor()])
コード例 #20
0
def train_lenet():
    context.set_context(mode=context.GRAPH_MODE, save_graphs=True, device_target="CPU")
    dataset_sink_mode = False

    # download mnist dataset
    download_dataset()

    # learning rate setting
    lr = 0.01
    momentum = 0.9
    epoch_size = 1
    mnist_path = "../MNIST_Data"

    # define the loss function
    net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean")
    repeat_size = epoch_size

    # create the network
    network = LeNet5()

    # define the optimizer
    net_opt = nn.Momentum(network.trainable_params(), lr, momentum)
    config_ck = CheckpointConfig(save_checkpoint_steps=1875, keep_checkpoint_max=10)

    # save the network model and parameters for subsequence fine-tuning
    ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", config=config_ck)

    # group layers into an object with training and evaluation features
    model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})

    summary_writer = SummaryRecord(log_dir="../../summary", network=network)
    summary_callback = SummaryStep(summary_writer, flush_step=10)

    # Init TrainLineage to record the training information
    train_callback = TrainLineage(summary_writer)

    train_net(
        model,
        epoch_size,
        mnist_path,
        repeat_size,
        ckpoint_cb,
        dataset_sink_mode,
        callbacks=[summary_callback, train_callback],
    )

    test_net(network, model, mnist_path)

    summary_writer.close()
コード例 #21
0
def test_train_and_eval_lenet():
    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
    network = LeNet5(10)
    net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean")
    net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)
    model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})

    print("============== Starting Training ==============")
    ds_train = create_dataset(os.path.join('/home/workspace/mindspore_dataset/mnist', "train"), 32, 1)
    model.train(1, ds_train, callbacks=[LossMonitor()], dataset_sink_mode=True)

    print("============== Starting Testing ==============")
    ds_eval = create_dataset(os.path.join('/home/workspace/mindspore_dataset/mnist', "test"), 32, 1)
    acc = model.eval(ds_eval, dataset_sink_mode=True)
    print("============== {} ==============".format(acc))
コード例 #22
0
def train_net(args, epoch_size, data_path, eval_per_epoch, repeat_size,
              ckpoint_cb, sink_mode):
    """define the training method"""
    print("============== Starting Training ==============")
    # Create training dataset
    ds_train = create_dataset(args, True, training_path, 32, repeat_size)
    # Initialise model
    model = Model(resnet, net_loss, net_opt, metrics={"Accuracy": Accuracy()})
    # model = Model(resnet, net_loss, net_opt, metrics={"Accuracy": Accuracy()}, amp_level="O3") # this will not work for CPU
    epoch_per_eval = {"epoch": [], "acc": []}
    eval_cb = Evalcb(model, ds_train, eval_per_epoch, epoch_per_eval)
    model.train(epoch_size,
                ds_train,
                callbacks=[ckpoint_cb, LossMonitor(), eval_cb],
                dataset_sink_mode=sink_mode)
コード例 #23
0
ファイル: eval.py プロジェクト: peixinhou/mindspore
def eval_lenet5():
    """Evaluation of lenet5"""
    context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)

    network = LeNet5(config.num_classes)
    net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
    net_opt = nn.Momentum(network.trainable_params(), config.lr, config.momentum)
    model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})

    print("============== Starting Testing ==============")
    load_checkpoint(config.ckpt_path, network)
    ds_eval = create_lenet_dataset(os.path.join(config.data_path, "test"), config.batch_size, 1)
    if ds_eval.get_dataset_size() == 0:
        raise ValueError("Please check dataset size > 0 and batch_size <= dataset size")

    acc = model.eval(ds_eval)
    print("============== {} ==============".format(acc))
コード例 #24
0
def train_lenet():
    context.set_context(mode=context.GRAPH_MODE, device_target=device_target)
    cfg = nonquant_cfg
    ds_train = create_dataset(os.path.join(data_path, "train"),
                              cfg.batch_size)

    network = LeNet5(cfg.num_classes)
    net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
    net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum)
    time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())
    config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps,
                                 keep_checkpoint_max=cfg.keep_checkpoint_max)
    ckpoint_cb = ModelCheckpoint(prefix="ckpt_lenet_noquant", config=config_ck)
    model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})

    print("============== Starting Training Lenet==============")
    model.train(cfg['epoch_size'], ds_train, callbacks=[time_cb, ckpoint_cb, LossMonitor()],
                dataset_sink_mode=True)
コード例 #25
0
ファイル: main.py プロジェクト: mindspore-ai/course
def train(Net):
    ds_train, ds_test = create_dataset()
    # 构建网络
    network = Net(cfg.num_classes)
    # 定义模型的损失函数,优化器
    net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
    net_opt = nn.Adam(network.trainable_params(), cfg.lr)
    # 训练模型
    model = Model(network,
                  loss_fn=net_loss,
                  optimizer=net_opt,
                  metrics={'acc': Accuracy()})
    loss_cb = LossMonitor()
    print("============== Starting Training ==============")
    model.train(30, ds_train, callbacks=[loss_cb], dataset_sink_mode=True)
    # 验证
    metric = model.eval(ds_test)
    print(metric)

    return model
コード例 #26
0
def eval_lenet():
    context.set_context(mode=context.GRAPH_MODE, device_target=device_target)
    cfg = nonquant_cfg
    ds_eval = create_dataset(os.path.join(data_path, "test"), cfg.batch_size,
                             1)
    ckpt_path = './ckpt_lenet_noquant-10_1875.ckpt'
    # define fusion network
    network = LeNet5(cfg.num_classes)
    net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
    net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum)
    # call back and monitor
    model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})
    # load quantization aware network checkpoint
    param_dict = load_checkpoint(ckpt_path)
    not_load_param = load_param_into_net(network, param_dict)
    if not_load_param:
        raise ValueError("Load param into net fail!")

    print("============== Starting Testing ==============")
    acc = model.eval(ds_eval, dataset_sink_mode=True)
    print("============== {} ==============".format(acc))
    assert acc['Accuracy'] > 0.98
コード例 #27
0
def mnist_train(epoch_size, batch_size, lr, momentum):
    context.set_context(mode=context.GRAPH_MODE,
                        device_target="Ascend",
                        enable_mem_reuse=False)

    lr = lr
    momentum = momentum
    epoch_size = epoch_size
    mnist_path = "./MNIST_unzip/"
    ds = generate_mnist_dataset(os.path.join(mnist_path, "train"),
                                batch_size=batch_size,
                                repeat_size=1)

    network = LeNet5()
    network.set_train()
    net_loss = CrossEntropyLoss()
    net_opt = nn.Momentum(network.trainable_params(), lr, momentum)
    config_ck = CheckpointConfig(save_checkpoint_steps=1875,
                                 keep_checkpoint_max=10)
    ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet",
                                 directory='./trained_ckpt_file/',
                                 config=config_ck)
    model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})

    LOGGER.info(TAG, "============== Starting Training ==============")
    model.train(epoch_size,
                ds,
                callbacks=[ckpoint_cb, LossMonitor()],
                dataset_sink_mode=False)  # train

    LOGGER.info(TAG, "============== Starting Testing ==============")
    param_dict = load_checkpoint(
        "trained_ckpt_file/checkpoint_lenet-10_1875.ckpt")
    load_param_into_net(network, param_dict)
    ds_eval = generate_mnist_dataset(os.path.join(mnist_path, "test"),
                                     batch_size=batch_size)
    acc = model.eval(ds_eval)
    LOGGER.info(TAG, "============== Accuracy: %s ==============", acc)
コード例 #28
0
def test_suppress_model_with_pynative_mode():
    context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
    networks_l5 = LeNet5()
    epochs = 5
    batch_num = 10
    mask_times = 10
    lr = 0.01
    masklayers_lenet5 = []
    masklayers_lenet5.append(MaskLayerDes("conv1.weight", 0, False, False, -1))
    suppress_ctrl_instance = SuppressPrivacyFactory().create(networks_l5,
                                                             masklayers_lenet5,
                                                             policy="local_train",
                                                             end_epoch=epochs,
                                                             batch_num=batch_num,
                                                             start_epoch=1,
                                                             mask_times=mask_times,
                                                             lr=lr,
                                                             sparse_end=0.50,
                                                             sparse_start=0.0)
    net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
    net_opt = nn.SGD(networks_l5.trainable_params(), lr)
    model_instance = SuppressModel(
        network=networks_l5,
        loss_fn=net_loss,
        optimizer=net_opt,
        metrics={"Accuracy": Accuracy()})
    model_instance.link_suppress_ctrl(suppress_ctrl_instance)
    suppress_masker = SuppressMasker(model=model_instance, suppress_ctrl=suppress_ctrl_instance)
    config_ck = CheckpointConfig(save_checkpoint_steps=batch_num, keep_checkpoint_max=10)
    ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet",
                                 directory="./trained_ckpt_file/",
                                 config=config_ck)
    ds_train = ds.GeneratorDataset(dataset_generator, ['data', 'label'])

    model_instance.train(epochs, ds_train, callbacks=[ckpoint_cb, LossMonitor(), suppress_masker],
                         dataset_sink_mode=False)
コード例 #29
0
                        path where the trained ckpt file')
    parser.add_argument('--dataset_sink_mode',
                        type=bool,
                        default=False,
                        help='dataset_sink_mode is False or True')

    args = parser.parse_args()

    context.set_context(mode=context.GRAPH_MODE,
                        device_target=args.device_target)

    network = LeNet5(cfg.num_classes)
    net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False,
                                                sparse=True,
                                                reduction="mean")
    repeat_size = cfg.epoch_size
    net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum)
    config_ck = CheckpointConfig(
        save_checkpoint_steps=cfg.save_checkpoint_steps,
        keep_checkpoint_max=cfg.keep_checkpoint_max)
    ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", config=config_ck)
    model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})

    print("============== Starting Testing ==============")
    param_dict = load_checkpoint(args.ckpt_path)
    load_param_into_net(network, param_dict)
    ds_eval = create_dataset(os.path.join(args.data_path, "test"),
                             cfg.batch_size, 1)
    acc = model.eval(ds_eval, dataset_sink_mode=args.dataset_sink_mode)
    print("============== {} ==============".format(acc))
コード例 #30
0
def mnist_suppress_train(epoch_size=10,
                         start_epoch=3,
                         lr=0.05,
                         samples=10000,
                         mask_times=1000,
                         sparse_thd=0.90,
                         sparse_start=0.0,
                         masklayers=None):
    """
    local train by suppress-based privacy
    """

    networks_l5 = LeNet5()
    suppress_ctrl_instance = SuppressPrivacyFactory().create(
        networks_l5,
        masklayers,
        policy="local_train",
        end_epoch=epoch_size,
        batch_num=(int)(samples / cfg.batch_size),
        start_epoch=start_epoch,
        mask_times=mask_times,
        lr=lr,
        sparse_end=sparse_thd,
        sparse_start=sparse_start)
    net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
    net_opt = nn.SGD(networks_l5.trainable_params(), lr)
    config_ck = CheckpointConfig(save_checkpoint_steps=(int)(samples /
                                                             cfg.batch_size),
                                 keep_checkpoint_max=10)

    # Create the SuppressModel model for training.
    model_instance = SuppressModel(network=networks_l5,
                                   loss_fn=net_loss,
                                   optimizer=net_opt,
                                   metrics={"Accuracy": Accuracy()})
    model_instance.link_suppress_ctrl(suppress_ctrl_instance)

    # Create a Masker for Suppress training. The function of the Masker is to
    # enforce suppress operation while training.
    suppress_masker = SuppressMasker(model=model_instance,
                                     suppress_ctrl=suppress_ctrl_instance)

    mnist_path = "./MNIST_unzip/"  #"../../MNIST_unzip/"
    ds_train = generate_mnist_dataset(os.path.join(mnist_path, "train"),
                                      batch_size=cfg.batch_size,
                                      repeat_size=1,
                                      samples=samples)

    ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet",
                                 directory="./trained_ckpt_file/",
                                 config=config_ck)

    print("============== Starting SUPP Training ==============")
    model_instance.train(
        epoch_size,
        ds_train,
        callbacks=[ckpoint_cb, LossMonitor(), suppress_masker],
        dataset_sink_mode=False)

    print("============== Starting SUPP Testing ==============")
    ds_eval = generate_mnist_dataset(os.path.join(mnist_path, 'test'),
                                     batch_size=cfg.batch_size)
    acc = model_instance.eval(ds_eval, dataset_sink_mode=False)
    print("============== SUPP Accuracy: %s  ==============", acc)

    suppress_ctrl_instance.print_paras()