Example #1
0
def test_pynative_lenet_with_new_interface():
    context.set_context(mode=context.PYNATIVE_MODE)

    epoch_size = 20
    batch_size = 32
    inputs = Tensor(np.ones([batch_size, 1, 32, 32]).astype(np.float32))
    labels = Tensor(np.ones([batch_size]).astype(np.int32))

    net = LeNet()
    criterion = CrossEntropyLoss()
    net_with_criterion = WithLossCell(net, criterion)
    net_with_criterion.set_train()

    weights = ParameterTuple(
        filter(lambda x: x.requires_grad, net.get_parameters()))
    optimizer = Momentum(weights, 0.1, 0.9)

    forward_value_and_grad = nn.ForwardValueAndGrad(network=net_with_criterion,
                                                    weights=weights,
                                                    get_by_list=True)
    total_time = 0
    for epoch in range(0, epoch_size):
        start_time = time.time()
        loss_output, grads = forward_value_and_grad(inputs, labels)
        optimizer(grads)
        end_time = time.time()
        cost_time = end_time - start_time
        total_time = total_time + cost_time

        print("======epoch: ", epoch, " loss: ", loss_output.asnumpy(),
              " cost time: ", cost_time)
    assert loss_output.asnumpy() < 0.005
    assert loss_output.asnumpy() > 0.003
def test_pynative_resnet50():
    context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")

    batch_size = 32
    num_classes = 10
    net = resnet50(batch_size, num_classes)
    criterion = CrossEntropyLoss()
    optimizer = Momentum(learning_rate=0.01, momentum=0.9,
                         params=filter(lambda x: x.requires_grad, net.get_parameters()))

    net_with_criterion = WithLossCell(net, criterion)
    net_with_criterion.set_grad()
    train_network = GradWrap(net_with_criterion)
    train_network.set_train()

    step = 0
    max_step = 21
    exceed_num = 0
    data_set = create_dataset(repeat_num=1, training=True, batch_size=batch_size)
    for element in data_set.create_dict_iterator(num_epochs=1):
        step = step + 1
        if step > max_step:
            break
        start_time = time.time()
        input_data = element["image"]
        input_label = element["label"]
        loss_output = net_with_criterion(input_data, input_label)
        grads = train_network(input_data, input_label)
        optimizer(grads)
        end_time = time.time()
        cost_time = end_time - start_time
        print("======step: ", step, " loss: ", loss_output.asnumpy(), " cost time: ", cost_time)
        if step > 1 and cost_time > 0.25:
            exceed_num = exceed_num + 1
    assert exceed_num < 20
Example #3
0
def test_ascend_pynative_lenet():
    context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")

    epoch_size = 20
    batch_size = 32
    inputs = Tensor(np.ones([batch_size, 1, 32, 32]).astype(np.float32))
    labels = Tensor(np.ones([batch_size]).astype(np.int32))

    net = LeNet()
    criterion = CrossEntropyLoss()
    optimizer = Momentum(
        filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9)

    net_with_criterion = WithLossCell(net, criterion)
    train_network = GradWrap(net_with_criterion)
    train_network.set_train()
    total_time = 0

    for epoch in range(0, epoch_size):
        start_time = time.time()
        fw_output = net(inputs)
        loss_output = criterion(fw_output, labels)
        grads = train_network(inputs, labels)
        success = optimizer(grads)
        end_time = time.time()
        cost_time = end_time - start_time
        total_time = total_time + cost_time

        print("======epoch: ", epoch, " loss: ", loss_output.asnumpy(),
              " cost time: ", cost_time)
    assert (loss_output.asnumpy() < 0.1)
Example #4
0
def test_loss_scale_fp16_lr_overflow_set_sense_scale():
    inputs = Tensor(np.ones([16, 16]).astype(np.float32))
    label = Tensor(np.zeros([16, 16]).astype(np.float32))
    lr = Tensor(np.ones([1], np.float32) * 0.1)
    net = NetFP16(16, 16)
    net.set_train()

    loss = MSELoss()
    optimizer = Momentum(net.trainable_params(),
                         learning_rate=lr,
                         momentum=0.9)

    net_with_loss = WithLossCell(net, loss)
    train_network = TrainOneStepWithLossScaleCell(
        net_with_loss,
        optimizer,
        scale_sense=Tensor(np.full((1),
                                   np.finfo(np.float32).max),
                           dtype=mstype.float32))
    output_1 = train_network(inputs, label)

    train_network.set_sense_scale(
        Tensor(np.full((1),
                       np.finfo(np.float32).max), dtype=mstype.float32))
    output_2 = train_network(inputs, label)
    assert output_1[0].asnumpy() == output_2[0].asnumpy()
    assert output_1[1].asnumpy() == output_2[1].asnumpy() == True
Example #5
0
def adam_compile(loss_scale=1.0):
    inputs = Tensor(np.ones([15, 1]).astype(np.float32))
    label = Tensor(np.zeros([15, 1]).astype(np.float32))
    net = Net(1, 1)

    loss = MSELoss()
    optimizer = Adam(net.trainable_params(),
                     learning_rate=1e-3,
                     beta1=0.9,
                     beta2=0.999,
                     eps=1e-8,
                     use_locking=False,
                     use_nesterov=False,
                     weight_decay=0.0,
                     loss_scale=loss_scale)

    net_with_loss = WithLossCell(net, loss)
    train_network = TrainOneStepWithLossScaleCell(net_with_loss,
                                                  optimizer,
                                                  scale_sense=Tensor(
                                                      np.full((1), 1.0),
                                                      dtype=mstype.float32))
    train_network.set_train()
    output = train_network(inputs, label)
    print("the result is ", output)
Example #6
0
def test_loss_scale_fp16_overflow():
    inputs = Tensor(np.ones([16, 16]).astype(np.float32))
    label = Tensor(np.zeros([16, 16]).astype(np.float32))
    scaling_sens = Tensor(np.full((1),
                                  np.finfo(np.float32).max),
                          dtype=mstype.float32)
    net = NetFP16(16, 16)
    net.set_train()

    loss = MSELoss()
    optimizer = Lamb(net.trainable_params(), learning_rate=0.01)
    net_with_loss = WithLossCell(net, loss)
    net_with_loss.set_grad()
    train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer)
    output_1 = train_network(inputs, label, scaling_sens)
    output_2 = train_network(inputs, label, scaling_sens)
    assert output_1[0].asnumpy() == output_2[0].asnumpy()
    assert output_1[1].asnumpy() == output_2[1].asnumpy() == True
Example #7
0
def test_compile_fp16_overflow():
    inputs = Tensor(np.ones([16, 16]).astype(np.float32))
    label = Tensor(np.zeros([16, 16]).astype(np.float32))
    scaling_sens = Tensor(np.full((1), np.finfo(np.float32).max), dtype=mstype.float32)
    net = NetFP16(16, 16)

    loss = MSELoss()
    optimizer = Lamb(net.trainable_params(), decay_steps=10, warmup_steps=5)
    net_with_loss = WithLossCell(net, loss)
    train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer)
    train_network.set_train()
    output = train_network(inputs, label, scaling_sens)
    print("the result is ", output)
Example #8
0
def test_momentum_compile():
    inputs = Tensor(np.ones([15, 1]).astype(np.float32))
    label = Tensor(np.zeros([15, 1]).astype(np.float32))
    scaling_sens = Tensor(np.full((1), 1.0), dtype=mstype.float32)
    net = Net(1, 1)

    loss = MSELoss()
    optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)

    net_with_loss = WithLossCell(net, loss)
    train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer)
    train_network.set_train()
    output = train_network(inputs, label, scaling_sens)
    print("the result is ", output)
Example #9
0
def test_compile_fp16_lr_overflow():
    inputs = Tensor(np.ones([16, 16]).astype(np.float32))
    label = Tensor(np.zeros([16, 16]).astype(np.float32))
    scaling_sens = Tensor(np.full((1), np.finfo(np.float32).max), dtype=mstype.float32)
    lr = Tensor(np.ones([1], np.float32) * 0.1)
    net = NetFP16(16, 16)
    loss = MSELoss()
    optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=0.9)

    net_with_loss = WithLossCell(net, loss)
    train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer)
    train_network.set_train()
    output = train_network(inputs, label, scaling_sens)
    print("the result is ", output)
Example #10
0
def test_compile_fp16_lr_overflow_dynamic_graph():
    inputs = Tensor(np.ones([16, 16]).astype(np.float32))
    label = Tensor(np.zeros([16, 16]).astype(np.float32))
    lr = Tensor(np.ones([1], np.float32) * 0.1)
    net = NetFP16(16, 16)
    loss = MSELoss()
    optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=0.9)

    net_with_loss = WithLossCell(net, loss)
    scale_manager = DynamicLossScaleManager()
    update_cell = scale_manager.get_update_cell()
    train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=update_cell)
    train_network.set_train()
    output = train_network(inputs, label)
    print("the result is ", output)
def test_compile_grad_error():
    inputs = Tensor(np.ones([16, 16]).astype(np.float32))
    label = Tensor(np.zeros([16, 16]).astype(np.float32))
    lr = Tensor(np.ones([1], np.float32) * 0.1)
    net = NetFP16(16, 16)
    loss = MSELoss()
    optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=0.9)

    net_with_loss = WithLossCell(net, loss)
    scale_manager = DynamicLossScaleManager()
    update_cell = scale_manager.get_update_cell()
    train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=update_cell)
    train_network.set_train()
    with pytest.raises(TypeError) as e:
        train_network(inputs, label)
        print(e)