コード例 #1
0
def test_group_lr():
    inputs = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32) * 0.01)
    label = Tensor(np.ones([1, 10]).astype(np.float32))

    net = LeNet5()
    conv_lr = 0.8
    default_lr = 0.1
    conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
    no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
    group_params = [{'params': conv_params, 'lr': conv_lr},
                    {'params': no_conv_params}]
    net.set_train()
    loss = nn.SoftmaxCrossEntropyWithLogits()

    opt = Momentum(group_params, learning_rate=default_lr, momentum=0.9)
    assert opt.is_group is True
    assert opt.dynamic_lr is False
    for lr, param in zip(opt.learning_rate, opt.parameters):
        if param in conv_params:
            assert lr.data == Tensor(conv_lr, mstype.float32)
        else:
            assert lr.data == Tensor(default_lr, mstype.float32)

    net_with_loss = WithLossCell(net, loss)
    train_network = TrainOneStepCell(net_with_loss, opt)
    _executor.compile(train_network, inputs, label)
コード例 #2
0
ファイル: test_callback.py プロジェクト: opendlf/mindspore
def test_checkpoint_save_ckpt_seconds():
    """Test checkpoint save ckpt seconds."""
    train_config = CheckpointConfig(save_checkpoint_steps=16,
                                    save_checkpoint_seconds=100,
                                    keep_checkpoint_max=0,
                                    keep_checkpoint_per_n_minutes=1)
    ckpt_cb = ModelCheckpoint(config=train_config)
    cb_params = _InternalCallbackParam()
    net = Net()
    loss = nn.SoftmaxCrossEntropyWithLogits()
    optim = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
    network_ = WithLossCell(net, loss)
    _train_network = TrainOneStepCell(network_, optim)
    cb_params.train_network = _train_network
    cb_params.epoch_num = 10
    cb_params.cur_epoch_num = 4
    cb_params.cur_step_num = 128
    cb_params.batch_num = 32
    run_context = RunContext(cb_params)
    ckpt_cb.begin(run_context)
    ckpt_cb.step_end(run_context)
    ckpt_cb2 = ModelCheckpoint(config=train_config)
    cb_params.cur_epoch_num = 1
    cb_params.cur_step_num = 16
    ckpt_cb2.begin(run_context)
    ckpt_cb2.step_end(run_context)
コード例 #3
0
ファイル: test_training.py プロジェクト: opendlf/mindspore
def get_model(metrics=None):
    """ get_model """
    net = Net()
    loss = nn.SoftmaxCrossEntropyWithLogits()
    optim = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
    model = Model(net, loss_fn=loss, optimizer=optim, metrics=metrics)
    return model
コード例 #4
0
def test_checkpoint_save_ckpt_with_encryption():
    """Test checkpoint save ckpt with encryption."""
    train_config = CheckpointConfig(
        save_checkpoint_steps=16,
        save_checkpoint_seconds=0,
        keep_checkpoint_max=5,
        keep_checkpoint_per_n_minutes=0,
        enc_key=os.urandom(16),
        enc_mode="AES-GCM")
    ckpt_cb = ModelCheckpoint(config=train_config)
    cb_params = _InternalCallbackParam()
    net = Net()
    loss = nn.SoftmaxCrossEntropyWithLogits()
    optim = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
    network_ = WithLossCell(net, loss)
    _train_network = TrainOneStepCell(network_, optim)
    cb_params.train_network = _train_network
    cb_params.epoch_num = 10
    cb_params.cur_epoch_num = 5
    cb_params.cur_step_num = 160
    cb_params.batch_num = 32
    run_context = RunContext(cb_params)
    ckpt_cb.begin(run_context)
    ckpt_cb.step_end(run_context)
    ckpt_cb2 = ModelCheckpoint(config=train_config)
    cb_params.cur_epoch_num = 1
    cb_params.cur_step_num = 15

    if platform.system().lower() == "windows":
        with pytest.raises(NotImplementedError):
            ckpt_cb2.begin(run_context)
            ckpt_cb2.step_end(run_context)
    else:
        ckpt_cb2.begin(run_context)
        ckpt_cb2.step_end(run_context)
コード例 #5
0
def test_momentum():
    class Net(nn.Cell):
        def __init__(self, strategy1, strategy2, weight):
            super().__init__()
            self.weight = Parameter(weight, "w1")
            self.matmul = P.MatMul(transpose_a=False,
                                   transpose_b=True).set_strategy(strategy1)
            self.relu = P.ReLU().set_strategy(strategy2)

        def construct(self, x):
            out = self.matmul(x, self.weight)
            out = self.relu(out)
            return out

    context.set_auto_parallel_context(device_num=4, global_rank=0)
    strategy1 = ((2, 1), (2, 1))
    strategy2 = ((4, 1), )
    strategy3 = ((4, 1), (4, 1))

    x = Tensor(np.ones([64, 32]), dtype=ms.float32)
    weight = Tensor(np.ones([64, 32]), dtype=ms.float32)
    b = Tensor(np.ones([64, 64]), dtype=ms.float32)

    net = Net(strategy1, strategy2, weight)

    optimizer = Momentum(net.trainable_params(),
                         learning_rate=0.1,
                         momentum=0.9)

    net_with_loss = NetWithLoss(net, strategy3)

    train_net = TrainOneStepCell(net_with_loss, optimizer)
    context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")

    compile_net(train_net, x, b)
コード例 #6
0
def test_resnet_train_tensor():
    """test_resnet_train_tensor"""
    batch_size = 1
    size = 2
    context.set_context(mode=context.GRAPH_MODE)
    context.reset_auto_parallel_context()
    context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, device_num=size,
                                      parameter_broadcast=True)
    one_hot_len = 10
    dataset_types = (np.float32, np.float32)
    dataset_shapes = [[batch_size, 3, 224, 224], [batch_size, one_hot_len]]
    predict = Tensor(np.ones([batch_size, 3, 224, 224]).astype(np.float32) * 0.01)
    label = Tensor(np.zeros([batch_size, one_hot_len]).astype(np.float32))
    dataset = DatasetLenet(predict, label, 2,
                           size=2, batch_size=2,
                           np_types=dataset_types,
                           output_shapes=dataset_shapes,
                           input_indexs=(0, 1))
    dataset.reset()
    network = resnet9(one_hot_len)
    network.set_train()
    loss_fn = nn.SoftmaxCrossEntropyWithLogits()
    optimizer = Momentum(filter(lambda x: x.requires_grad, network.get_parameters()), learning_rate=0.1, momentum=0.9)
    model = Model(network=network, loss_fn=loss_fn, optimizer=optimizer)
    model.train(epoch=2, train_dataset=dataset, dataset_sink_mode=False)
    context.set_context(mode=context.GRAPH_MODE)
    context.reset_auto_parallel_context()
コード例 #7
0
ファイル: test_loss_scale.py プロジェクト: yrpang/mindspore
def test_loss_scale_fp16_lr_overflow_set_sense_scale():
    inputs = Tensor(np.ones([16, 16]).astype(np.float32))
    label = Tensor(np.zeros([16, 16]).astype(np.float32))
    lr = Tensor(np.ones([1], np.float32) * 0.1)
    net = NetFP16(16, 16)
    net.set_train()

    loss = MSELoss()
    optimizer = Momentum(net.trainable_params(),
                         learning_rate=lr,
                         momentum=0.9)

    net_with_loss = WithLossCell(net, loss)
    train_network = TrainOneStepWithLossScaleCell(
        net_with_loss,
        optimizer,
        scale_sense=Tensor(np.full((1),
                                   np.finfo(np.float32).max),
                           dtype=mstype.float32))
    output_1 = train_network(inputs, label)

    train_network.set_sense_scale(
        Tensor(np.full((1),
                       np.finfo(np.float32).max), dtype=mstype.float32))
    output_2 = train_network(inputs, label)
    assert output_1[0].asnumpy() == output_2[0].asnumpy()
    assert output_1[1].asnumpy() == output_2[1].asnumpy() == True
コード例 #8
0
ファイル: test_summary.py プロジェクト: zuoshou030/mindspore
    def _run_network(self, dataset_sink_mode=True):
        lenet = LeNet5()
        loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False,
                                                sparse=True,
                                                reduction="mean")
        optim = Momentum(lenet.trainable_params(),
                         learning_rate=0.1,
                         momentum=0.9)
        model = Model(lenet,
                      loss_fn=loss,
                      optimizer=optim,
                      metrics={'acc': Accuracy()})
        summary_dir = tempfile.mkdtemp(dir=self.base_summary_dir)
        summary_collector = SummaryCollector(summary_dir=summary_dir,
                                             collect_freq=1)

        ds_train = create_dataset(os.path.join(self.mnist_path, "train"))
        model.train(1,
                    ds_train,
                    callbacks=[summary_collector],
                    dataset_sink_mode=dataset_sink_mode)

        ds_eval = create_dataset(os.path.join(self.mnist_path, "test"))
        model.eval(ds_eval,
                   dataset_sink_mode=dataset_sink_mode,
                   callbacks=[summary_collector])

        self._check_summary_result(summary_dir)
コード例 #9
0
ファイル: test_gpu_lstm.py プロジェクト: zsangel378/mindspore
def test_LSTM():
    num_epochs = 5
    embed_size = 100
    num_hiddens = 100
    num_layers = 2
    bidirectional = True
    labels = 2
    vocab_size = 252193
    max_len = 500

    weight = np.ones((vocab_size+1, embed_size)).astype(np.float32)

    net = SentimentNet(vocab_size=(vocab_size+1), embed_size=embed_size,
                       num_hiddens=num_hiddens, num_layers=num_layers,
                       bidirectional=bidirectional, weight=weight,
                       labels=labels, batch_size=batch_size)

    learning_rate = 0.1
    momentum = 0.9

    optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate, momentum)
    criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
    net_with_criterion = WithLossCell(net, criterion)
    train_network = TrainOneStepCell(net_with_criterion, optimizer)  # optimizer
    train_network.set_train()

    train_features = Tensor(np.ones([64, max_len]).astype(np.int32))
    train_labels = Tensor(np.ones([64, ]).astype(np.int32)[0:64])
    losses = []
    for epoch in range(num_epochs):
        loss = train_network(train_features, train_labels)
        losses.append(loss)
        print("loss:", loss.asnumpy())
    assert(losses[-1].asnumpy() < 0.01)
コード例 #10
0
def test_trains():
    init()
    lr = 0.1
    momentum = 0.9
    max_epoch = 20
    device_number = 32
    batch_size_per_device = 128
    input_channels = 256
    out_channels = 512

    context.reset_auto_parallel_context()
    context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, device_num=device_number)
    predict = Tensor(np.ones([batch_size_per_device, input_channels]), dtype=ms.float32)
    dataset = Dataset(predict, 4)

    network = fc_with_initialize(input_channels, out_channels)
    network.set_train()

    criterion = get_loss(batch_size_per_device * device_number)

    train_network = BuildTrainNetwork(network, criterion)
    train_network.set_train()
    opt = Momentum(train_network.trainable_params(), lr, momentum)
    train_net = TrainOneStepCell(train_network, opt).set_train()

    model = Model(train_net)
    model.train(max_epoch, dataset, dataset_sink_mode=False)
    context.reset_auto_parallel_context()
コード例 #11
0
def test_pynative_lenet_with_new_interface():
    context.set_context(mode=context.PYNATIVE_MODE)

    epoch_size = 20
    batch_size = 32
    inputs = Tensor(np.ones([batch_size, 1, 32, 32]).astype(np.float32))
    labels = Tensor(np.ones([batch_size]).astype(np.int32))

    net = LeNet()
    criterion = CrossEntropyLoss()
    net_with_criterion = WithLossCell(net, criterion)
    net_with_criterion.set_train()

    weights = ParameterTuple(
        filter(lambda x: x.requires_grad, net.get_parameters()))
    optimizer = Momentum(weights, 0.1, 0.9)

    forward_value_and_grad = nn.ForwardValueAndGrad(network=net_with_criterion,
                                                    weights=weights,
                                                    get_by_list=True)
    total_time = 0
    for epoch in range(0, epoch_size):
        start_time = time.time()
        loss_output, grads = forward_value_and_grad(inputs, labels)
        optimizer(grads)
        end_time = time.time()
        cost_time = end_time - start_time
        total_time = total_time + cost_time

        print("======epoch: ", epoch, " loss: ", loss_output.asnumpy(),
              " cost time: ", cost_time)
    assert loss_output.asnumpy() < 0.005
    assert loss_output.asnumpy() > 0.003
コード例 #12
0
ファイル: run_pretrain.py プロジェクト: brucejunlee/mindspore
def _get_optimizer(args_opt, network):
    """get bert optimizer, support Lamb, Momentum, AdamWeightDecay."""
    if cfg.optimizer == 'Lamb':
        lr_schedule = BertLearningRate(learning_rate=cfg.Lamb.learning_rate,
                                       end_learning_rate=cfg.Lamb.end_learning_rate,
                                       warmup_steps=cfg.Lamb.warmup_steps,
                                       decay_steps=args_opt.train_steps,
                                       power=cfg.Lamb.power)
        params = network.trainable_params()
        decay_params = list(filter(cfg.Lamb.decay_filter, params))
        other_params = list(filter(lambda x: not cfg.Lamb.decay_filter(x), params))
        group_params = [{'params': decay_params, 'weight_decay': cfg.Lamb.weight_decay},
                        {'params': other_params},
                        {'order_params': params}]
        optimizer = Lamb(group_params, learning_rate=lr_schedule, eps=cfg.Lamb.eps)
    elif cfg.optimizer == 'Momentum':
        optimizer = Momentum(network.trainable_params(), learning_rate=cfg.Momentum.learning_rate,
                             momentum=cfg.Momentum.momentum)
    elif cfg.optimizer == 'AdamWeightDecay':
        lr_schedule = BertLearningRate(learning_rate=cfg.AdamWeightDecay.learning_rate,
                                       end_learning_rate=cfg.AdamWeightDecay.end_learning_rate,
                                       warmup_steps=cfg.AdamWeightDecay.warmup_steps,
                                       decay_steps=args_opt.train_steps,
                                       power=cfg.AdamWeightDecay.power)
        params = network.trainable_params()
        decay_params = list(filter(cfg.AdamWeightDecay.decay_filter, params))
        other_params = list(filter(lambda x: not cfg.AdamWeightDecay.decay_filter(x), params))
        group_params = [{'params': decay_params, 'weight_decay': cfg.AdamWeightDecay.weight_decay},
                        {'params': other_params, 'weight_decay': 0.0},
                        {'order_params': params}]
        if args_opt.enable_lossscale == "true" and args_opt.device_target == 'GPU':
            optimizer = AdamWeightDecayForBert(group_params, learning_rate=lr_schedule, eps=cfg.AdamWeightDecay.eps)
        elif context.get_context("mode") == context.PYNATIVE_MODE and args_opt.device_target == 'GPU':
            optimizer = AdamWeightDecayOp(group_params, learning_rate=lr_schedule, eps=cfg.AdamWeightDecay.eps)
        else:
            optimizer = AdamWeightDecay(group_params, learning_rate=lr_schedule, eps=cfg.AdamWeightDecay.eps)
    elif cfg.optimizer == "Thor":
        from src.utils import get_bert_thor_lr, get_bert_thor_damping
        lr = get_bert_thor_lr(cfg.Thor.lr_max, cfg.Thor.lr_min, cfg.Thor.lr_power, cfg.Thor.lr_total_steps)
        damping = get_bert_thor_damping(cfg.Thor.damping_max, cfg.Thor.damping_min, cfg.Thor.damping_power,
                                        cfg.Thor.damping_total_steps)
        split_indices = None
        if bert_net_cfg.num_hidden_layers == 12:
            if bert_net_cfg.use_relative_positions:
                split_indices = [29, 58, 87, 116, 145, 174, 203, 217]
            else:
                split_indices = [28, 55, 82, 109, 136, 163, 190, 205]
        elif bert_net_cfg.num_hidden_layers == 24:
            if bert_net_cfg.use_relative_positions:
                split_indices = [30, 90, 150, 210, 270, 330, 390, 421]
            else:
                split_indices = [38, 93, 148, 203, 258, 313, 368, 397]
        optimizer = THOR(network, lr, damping, cfg.Thor.momentum,
                         cfg.Thor.weight_decay, cfg.Thor.loss_scale, cfg.batch_size,
                         decay_filter=lambda x: 'layernorm' not in x.name.lower() and 'bias' not in x.name.lower(),
                         split_indices=split_indices)
    else:
        raise ValueError("Don't support optimizer {}, only support [Lamb, Momentum, AdamWeightDecay, Thor]".
                         format(cfg.optimizer))
    return optimizer
コード例 #13
0
ファイル: test_profiler.py プロジェクト: yrpang/mindspore
    def test_gpu_profiler(self):
        context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
        profiler = Profiler(output_path='data')
        profiler_name = os.listdir(os.path.join(os.getcwd(), 'data'))[0]
        self.profiler_path = os.path.join(os.getcwd(),
                                          f'data/{profiler_name}/')
        ds_train = create_dataset(os.path.join(self.mnist_path, "train"))
        if ds_train.get_dataset_size() == 0:
            raise ValueError(
                "Please check dataset size > 0 and batch_size <= dataset size")

        lenet = LeNet5()
        loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
        optim = Momentum(lenet.trainable_params(),
                         learning_rate=0.1,
                         momentum=0.9)
        model = Model(lenet,
                      loss_fn=loss,
                      optimizer=optim,
                      metrics={'acc': Accuracy()})

        model.train(1, ds_train, dataset_sink_mode=True)
        profiler.analyse()

        self._check_gpu_profiling_file()
コード例 #14
0
    def test_summary_ops(self):
        """Test summary operators."""
        ds_train = create_mnist_dataset('train', num_samples=1, batch_size=1)
        ds_train_iter = ds_train.create_dict_iterator()
        expected_data = next(ds_train_iter)['image'].asnumpy()

        net = LeNet5()
        loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
        optim = Momentum(net.trainable_params(),
                         learning_rate=0.1,
                         momentum=0.9)
        model = Model(net,
                      loss_fn=loss,
                      optimizer=optim,
                      metrics={'loss': Loss()})
        model.train(1, ds_train, dataset_sink_mode=False)

        summary_data = _get_summary_tensor_data()
        image_data = summary_data['x[:Image]'].asnumpy()
        tensor_data = summary_data['x[:Tensor]'].asnumpy()
        x_fc3 = summary_data['x_fc3[:Scalar]'].asnumpy()

        assert np.allclose(expected_data, image_data)
        assert np.allclose(expected_data, tensor_data)
        assert not np.allclose(0, x_fc3)
コード例 #15
0
def auto_parallel_compile_net(mode,
                              dev_num,
                              net,
                              strategy1=None,
                              strategy2=None):
    context.set_context(mode=context.GRAPH_MODE)
    context.set_auto_parallel_context(parallel_mode=mode,
                                      device_num=dev_num,
                                      enable_parallel_optimizer=True)
    inputs = Tensor(np.ones([32, 48]).astype(np.float32))
    label = Tensor(np.zeros([32, 16]).astype(np.float32))
    net = net(strategy1, strategy2)
    net = _VirtualDatasetCell(net)
    optimizer = Momentum(net.trainable_params(),
                         learning_rate=0.1,
                         momentum=0.9)
    train_network = TrainOneStepCell(net, optimizer).set_comm_fusion(4)
    train_network.set_auto_parallel()
    train_network.set_train()
    _executor.compile(train_network,
                      inputs,
                      label,
                      phase="train",
                      auto_parallel_mode=True)
    context.reset_auto_parallel_context()
    return train_network
コード例 #16
0
ファイル: test_summary.py プロジェクト: peng-zhihui/mindspore
    def _run_network(self, dataset_sink_mode=False, num_samples=2, **kwargs):
        lenet = LeNet5()
        loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
        optim = Momentum(lenet.trainable_params(),
                         learning_rate=0.1,
                         momentum=0.9)
        model = Model(lenet,
                      loss_fn=loss,
                      optimizer=optim,
                      metrics={'loss': Loss()})
        summary_dir = tempfile.mkdtemp(dir=self.base_summary_dir)
        summary_collector = SummaryCollector(summary_dir=summary_dir,
                                             collect_freq=2,
                                             **kwargs)

        ds_train = create_dataset(os.path.join(self.mnist_path, "train"),
                                  num_samples=num_samples)
        model.train(1,
                    ds_train,
                    callbacks=[summary_collector],
                    dataset_sink_mode=dataset_sink_mode)

        ds_eval = create_dataset(os.path.join(self.mnist_path, "test"))
        model.eval(ds_eval,
                   dataset_sink_mode=dataset_sink_mode,
                   callbacks=[summary_collector])
        return summary_dir
コード例 #17
0
def test_lars():
    class Net(nn.Cell):
        def __init__(self, strategy1, strategy2, weight):
            super().__init__()
            self.weight = Parameter(weight, "w1")
            self.matmul = P.MatMul(transpose_a=False, transpose_b=True).shard(strategy1)
            self.relu = P.ReLU().shard(strategy2)

        def construct(self, x):
            out = self.matmul(x, self.weight)
            out = self.relu(out)
            return out

    context.set_auto_parallel_context(device_num=4, global_rank=0)
    context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
    strategy1 = ((2, 1), (2, 1))
    strategy2 = ((4, 1),)
    strategy3 = ((4, 1), (4, 1))

    x = Tensor(np.ones([64, 32]), dtype=ms.float32)
    weight = Tensor(np.ones([64, 32]), dtype=ms.float32)
    b = Tensor(np.ones([64, 64]), dtype=ms.float32)

    net = Net(strategy1, strategy2, weight)

    lr = Tensor(np.ones([6]), dtype=ms.float32)
    sgd = Momentum(net.trainable_params(), lr, 0.9)
    optimizer = LARS(sgd, epsilon=1e-08, coefficient=0.02,
                     lars_filter=lambda x: 'bn' not in x.name)
    net_with_loss = NetWithLoss(net, strategy3)
    train_net = TrainOneStepCell(net_with_loss, optimizer)

    compile_net(train_net, x, b)
コード例 #18
0
ファイル: test_compile.py プロジェクト: zsangel378/mindspore
def test_build():
    """ test_build """
    Tensor(np.random.randint(0, 255, [1, 3, 224, 224]))
    Tensor(np.random.randint(0, 10, [1, 10]))
    net = Net()
    opt = Momentum(net.get_parameters(), learning_rate=0.1, momentum=0.9)
    Model(net, loss_fn=loss, optimizer=opt, metrics=None)
コード例 #19
0
ファイル: test_gpu_resnet.py プロジェクト: chncwang/mindspore
def test_big_batchSize_with_new_interface(num_classes=10,
                                          epoch=8,
                                          batch_size=338):
    net = resnet50(num_classes)
    criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
    net_with_criterion = WithLossCell(net, criterion)
    net_with_criterion.set_train()

    weights = ParameterTuple(
        filter(lambda x: x.requires_grad, net.get_parameters()))
    optimizer = Momentum(weights, 0.1, 0.9)

    train_network = ForwardValueAndGrad(network=net_with_criterion,
                                        weights=weights,
                                        get_by_list=True,
                                        sens_param=True,
                                        sens=1.0)
    losses = []
    for i in range(0, epoch):
        data = Tensor(
            np.ones([batch_size, 3, 224, 224]).astype(np.float32) * 0.01)
        label = Tensor(np.ones([batch_size]).astype(np.int32))
        loss, grads = train_network(data, label)
        grads = F.identity(grads)
        optimizer(grads)
        losses.append(loss)
    assert (losses[-1].asnumpy() < 0.8)
コード例 #20
0
ファイル: test_row_tensor.py プロジェクト: dongkcs/mindspore
def test_row_tensor_model_train():
    class Net(nn.Cell):
        def __init__(self, in_features, out_features):
            super(Net, self).__init__()
            self.weight = Parameter(Tensor(
                np.ones([out_features, in_features]).astype(np.float32)),
                                    name="weight")
            self.add = P.TensorAdd()
            self.cast = P.Cast()
            self.flag = True

        def construct(self, inputs, label):
            x = self.add(inputs, self.weight)
            if self.flag:
                x = self.cast(x, mstype.float32)
            return x

    dataset_types = (np.float32, np.float32)
    dataset_shapes = ((16, 16), (16, 16))
    dataset = MindDataSet(dataset_types, dataset_shapes)
    net = Net(16, 16)
    net.set_train()

    optimizer = Momentum(net.trainable_params(),
                         learning_rate=0.1,
                         momentum=0.9)
    model = Model(net, optimizer=optimizer)
    model.train(2, dataset, dataset_sink_mode=False)
コード例 #21
0
def test_lenet_pynative_train_net():
    """ test_lenet_pynative_train_net """
    data = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32) * 0.01)
    label = Tensor(np.ones([1, 10]).astype(np.float32))
    dout = Tensor(np.ones([1]).astype(np.float32))
    iteration_num = 1
    verification_step = 0

    net = LeNet5()

    for i in range(0, iteration_num):
        # get the gradients
        loss_fn = nn.SoftmaxCrossEntropyWithLogits(is_grad=False)
        grad_fn = nn.SoftmaxCrossEntropyWithLogits()
        grad_net = WithGradCell(net, grad_fn, sens=dout)
        gradients = grad_net(data, label)

        # update parameters
        opt = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
        opt(gradients)

        # verification
        if i == verification_step:
            loss_net = WithLossCell(net, loss_fn)
            loss_output = loss_net(data, label)
            print("The loss of %s-th iteration is %s" % (i, loss_output.asnumpy()))
コード例 #22
0
def test_lenet_nccl():
    context.set_auto_parallel_context(parallel_mode="data_parallel",
                                      gradients_mean=True,
                                      device_num=get_group_size())
    net = LeNet()
    net.set_train()

    learning_rate = multisteplr(epoch, 2)
    momentum = 0.9
    mom_optimizer = Momentum(
        filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate,
        momentum)
    criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
    net_with_criterion = WithLossCell(net, criterion)
    train_network = TrainOneStepCell(net_with_criterion, mom_optimizer)
    train_network.set_train()
    losses = []

    data = Tensor(
        np.ones([net.batch_size, 3, 32, 32]).astype(np.float32) * 0.01)
    label = Tensor(np.ones([net.batch_size]).astype(np.int32))
    start = datetime.datetime.now()
    for _ in range(epoch):
        for _ in range(mini_batch):
            loss = train_network(data, label)
            losses.append(loss.asnumpy())
    end = datetime.datetime.now()
    with open("ms_time.txt", "w") as fo1:
        fo1.write("time:")
        fo1.write(str(end - start))
    with open("ms_loss.txt", "w") as fo2:
        fo2.write("loss:")
        fo2.write(str(losses[-5:]))
    assert losses[-1] < 0.01
コード例 #23
0
def net_trains(criterion, rank):
    init()
    lr = 0.1
    momentum = 0.9
    max_epoch = 20
    input_channels = 256
    out_channels = 512
    context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
    context.reset_auto_parallel_context()
    context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, device_num=device_number,
                                      global_rank=rank)
    predict = Tensor(np.ones([batch_size_per_device, input_channels]), dtype=ms.float32)
    dataset = Dataset(predict, 4)

    network = fc_with_initialize(input_channels, out_channels)
    network.set_train()

    train_network = BuildTrainNetwork(network, criterion)
    train_network.set_train()
    opt = Momentum(train_network.trainable_params(), lr, momentum)
    train_net = TrainOneStepCell(train_network, opt).set_train()

    model = Model(train_net)
    model.train(max_epoch, dataset, dataset_sink_mode=False)
    context.reset_auto_parallel_context()
コード例 #24
0
def test_group_dynamic_1():
    inputs = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32) * 0.01)
    label = Tensor(np.ones([1, 10]).astype(np.float32))

    net = LeNet5()
    conv_lr = 0.8
    default_lr = (0.1, 0.2, 0.3)
    conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
    no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
    group_params = [{'params': no_conv_params},
                    {'params': conv_params, 'lr': conv_lr},
                    {'order_params': net.trainable_params()}]
    net.set_train()
    loss = nn.SoftmaxCrossEntropyWithLogits()

    opt = Momentum(group_params, learning_rate=default_lr, momentum=0.9)
    assert opt.is_group is True
    assert opt.dynamic_lr is True
    assert opt.is_group_params_ordered is True
    for lr, param, order_param in zip(opt.learning_rate, opt.parameters, net.trainable_params()):
        if param in conv_params:
            assert np.all(lr.data.asnumpy() == Tensor(np.array([conv_lr] * 3).astype(np.float32)).asnumpy())
        else:
            assert np.all(lr.data.asnumpy() == Tensor(np.array(list(default_lr)).astype(np.float32)).asnumpy())

        assert param.name == order_param.name

    net_with_loss = WithLossCell(net, loss)
    train_network = TrainOneStepCell(net_with_loss, opt)
    _executor.compile(train_network, inputs, label)
コード例 #25
0
def test_ascend_pynative_lenet():
    context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")

    epoch_size = 20
    batch_size = 32
    inputs = Tensor(np.ones([batch_size, 1, 32, 32]).astype(np.float32))
    labels = Tensor(np.ones([batch_size]).astype(np.int32))

    net = LeNet()
    criterion = CrossEntropyLoss()
    optimizer = Momentum(
        filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9)

    net_with_criterion = WithLossCell(net, criterion)
    train_network = GradWrap(net_with_criterion)
    train_network.set_train()
    total_time = 0

    for epoch in range(0, epoch_size):
        start_time = time.time()
        fw_output = net(inputs)
        loss_output = criterion(fw_output, labels)
        grads = train_network(inputs, labels)
        success = optimizer(grads)
        end_time = time.time()
        cost_time = end_time - start_time
        total_time = total_time + cost_time

        print("======epoch: ", epoch, " loss: ", loss_output.asnumpy(),
              " cost time: ", cost_time)
    assert (loss_output.asnumpy() < 0.1)
コード例 #26
0
def test_momentum():
    epoch = 13
    net = MomentumNet()
    learning_rate = 0.1
    momentum = 0.9 

    optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate, momentum)
    criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
    net_with_criterion = WithLossCell(net, criterion)
    train_network = TrainOneStepCell(net_with_criterion, optimizer)  # optimizer
    train_network.set_train()
    losses = []
    for i in range(epoch):
        data = Tensor(np.arange(0, 16).reshape(1, 1, 4, 4).astype(np.float32)*0.01)
        label = Tensor(np.array([0]).astype(np.int32))
        loss = train_network(data, label)
        losses.append(loss)
    
    print("================================")
    print(losses)
    """
    expect output:
    [[0.04132498 0.00874167 0.00874167 0.00874167 0.00874167
      0.00874167 0.00874167 0.00874167 0.00874167 0.00874167]]
    """
    error = np.ones(shape=[1, 10]) * 1.0e-6

    return losses
コード例 #27
0
ファイル: test_callback.py プロジェクト: opendlf/mindspore
def test_save_checkpoint():
    """Test save checkpoint."""
    train_config = CheckpointConfig(save_checkpoint_steps=16,
                                    save_checkpoint_seconds=0,
                                    keep_checkpoint_max=5,
                                    keep_checkpoint_per_n_minutes=0)
    cb_params = _InternalCallbackParam()
    net = Net()
    loss = nn.SoftmaxCrossEntropyWithLogits()
    optim = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
    network_ = WithLossCell(net, loss)
    _train_network = TrainOneStepCell(network_, optim)
    cb_params.train_network = _train_network
    cb_params.epoch_num = 10
    cb_params.cur_epoch_num = 5
    cb_params.cur_step_num = 0
    cb_params.batch_num = 32
    ckpoint_cb = ModelCheckpoint(prefix="test_ckpt",
                                 directory='./test_files',
                                 config=train_config)
    run_context = RunContext(cb_params)
    ckpoint_cb.begin(run_context)
    ckpoint_cb.step_end(run_context)
    if os.path.exists('./test_files/test_ckpt-model.pkl'):
        os.chmod('./test_files/test_ckpt-model.pkl', stat.S_IWRITE)
        os.remove('./test_files/test_ckpt-model.pkl')
コード例 #28
0
def test_parameter_update_int32_and_tensor():
    """ test_parameter_update """
    net = Net()
    loss = nn.SoftmaxCrossEntropyWithLogits()
    optimizer = Momentum(net.get_parameters(), Tensor(np.array([0.1, 0.01, 0.001]), mstype.float32), 0.001)

    net_with_loss = WithLossCell(net, loss)
    train_network = TrainOneStepCell(net_with_loss, optimizer)

    # compile train graph
    train_network.set_train()
    inputs = Tensor(np.ones([1, 64]).astype(np.float32))
    label = Tensor(np.zeros([1, 10]).astype(np.float32))
    _executor.compile(train_network, inputs, label)

    # test tensor
    param_lr = train_network.parameters_dict()['learning_rate']
    update_network = ParameterUpdate(param_lr)
    update_network.phase = 'update_param'

    input_lr = Tensor(np.array([0.2, 0.02, 0.002]), mstype.float32)
    _executor.compile(update_network, input_lr)

    # test int32
    param_step = train_network.parameters_dict()['global_step']
    update_global_step = ParameterUpdate(param_step)

    input_step = Tensor(np.array([1000]), mstype.int32)
    _executor.compile(update_global_step, input_step)
コード例 #29
0
def test_momentum():
    epoch = 3
    net = NetMomentum()
    learning_rate = initializer(Tensor(np.array([0.01]).astype(np.float32)),
                                [1])
    momentum = initializer(Tensor(np.array([0.9]).astype(np.float32)), [1])

    optimizer = Momentum(
        filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate,
        momentum)
    criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
    net_with_criterion = WithLossCell(net, criterion)
    train_network = TrainOneStepCell(net_with_criterion,
                                     optimizer)  # optimizer
    train_network.set_train()
    losses = []
    for _ in range(epoch):
        data = Tensor(
            np.arange(0, 16).reshape(1, 1, 4, 4).astype(np.float32) * 0.01)
        label = Tensor(np.array([0]).astype(np.int32))
        loss = train_network(data, label)
        losses.append(loss)

    _ = np.ones(shape=[1, 10]) * 1.0e-6

    return losses
コード例 #30
0
ファイル: test_cpu_type.py プロジェクト: yrpang/mindspore
def test_momentum():
    epoch = 1
    net = MomentumNet()
    learning_rate = (0.1, 0.2)
    momentum = 0.9

    optimizer = Momentum(
        filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate,
        momentum)
    criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
    net_with_criterion = WithLossCell(net, criterion)
    train_network = TrainOneStepCell(net_with_criterion,
                                     optimizer)  # optimizer
    train_network.set_train()
    losses = []
    for _ in range(epoch):
        data = Tensor(
            np.arange(0, 16).reshape(1, 1, 4, 4).astype(np.float32) * 0.01)
        label = Tensor(np.array([0]).astype(np.int32))
        loss = train_network(data, label)
        losses.append(loss)
    print("================================")
    print(losses)

    return losses