Example #1
0
    def _run_network(self, dataset_sink_mode=False, num_samples=2, **kwargs):
        lenet = LeNet5()
        loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
        optim = Momentum(lenet.trainable_params(),
                         learning_rate=0.1,
                         momentum=0.9)
        model = Model(lenet,
                      loss_fn=loss,
                      optimizer=optim,
                      metrics={'loss': Loss()})
        summary_dir = tempfile.mkdtemp(dir=self.base_summary_dir)
        summary_collector = SummaryCollector(summary_dir=summary_dir,
                                             collect_freq=2,
                                             **kwargs)

        ds_train = create_dataset(os.path.join(self.mnist_path, "train"),
                                  num_samples=num_samples)
        model.train(1,
                    ds_train,
                    callbacks=[summary_collector],
                    dataset_sink_mode=dataset_sink_mode)

        ds_eval = create_dataset(os.path.join(self.mnist_path, "test"))
        model.eval(ds_eval,
                   dataset_sink_mode=dataset_sink_mode,
                   callbacks=[summary_collector])
        return summary_dir
Example #2
0
    def test_summary_ops(self):
        """Test summary operators."""
        ds_train = create_mnist_dataset('train', num_samples=1, batch_size=1)
        ds_train_iter = ds_train.create_dict_iterator()
        expected_data = next(ds_train_iter)['image'].asnumpy()

        net = LeNet5()
        loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
        optim = Momentum(net.trainable_params(),
                         learning_rate=0.1,
                         momentum=0.9)
        model = Model(net,
                      loss_fn=loss,
                      optimizer=optim,
                      metrics={'loss': Loss()})
        model.train(1, ds_train, dataset_sink_mode=False)

        summary_data = _get_summary_tensor_data()
        image_data = summary_data['x[:Image]'].asnumpy()
        tensor_data = summary_data['x[:Tensor]'].asnumpy()
        x_fc3 = summary_data['x_fc3[:Scalar]'].asnumpy()

        assert np.allclose(expected_data, image_data)
        assert np.allclose(expected_data, tensor_data)
        assert not np.allclose(0, x_fc3)
Example #3
0
def test_loss():
    """test_loss"""
    num = 5
    inputs = np.random.rand(num)

    loss = Loss()
    for k in range(num):
        loss.update(Tensor(np.array([inputs[k]])))

    assert inputs.mean() == loss.eval()

    loss.clear()
    with pytest.raises(RuntimeError):
        loss.eval()
Example #4
0
                config.label_smooth_factor = 0.0
            loss = CrossEntropySmooth(sparse=True,
                                      reduction="mean",
                                      smooth_factor=config.label_smooth_factor,
                                      num_classes=config.class_num)
        else:
            loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
        loss_scale = FixedLossScaleManager(config.loss_scale,
                                           drop_overflow_update=False)
        model = Model(net,
                      loss_fn=loss,
                      optimizer=opt,
                      loss_scale_manager=loss_scale,
                      metrics={
                          "top1": Top1CategoricalAccuracy(),
                          "loss": Loss()
                      },
                      amp_level="O2",
                      keep_batchnorm_fp32=False)
    else:
        # GPU target
        if args.dataset == "imagenet2012":
            if not config.use_label_smooth:
                config.label_smooth_factor = 0.0
            loss = CrossEntropySmooth(sparse=True,
                                      reduction="mean",
                                      smooth_factor=config.label_smooth_factor,
                                      num_classes=config.class_num)
        else:
            loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
Example #5
0
def test_loss_shape_error():
    loss = Loss()
    inp = np.ones(shape=[2, 2])
    with pytest.raises(ValueError):
        loss.update(inp)
Example #6
0
def test_loss_inputs_error():
    loss = Loss()
    with pytest.raises(ValueError):
        loss(np.array(1), np.array(2))