Exemple #1
0
def test_exponent_logarithm_operators():
    a = 2 * nd.ones(shape=LARGE_X)
    # exponent
    result = nd.exp(a)
    assert result[-1] == 7.389056
    assert result.shape == a.shape

    # exponent minus 1
    result = nd.expm1(a)
    assert result[-1] == 6.389056
    assert result.shape == a.shape

    # log2
    result = nd.log2(a)
    assert result[-1] == 1
    assert result.shape == a.shape

    # log10
    result = nd.log10(a)
    assert result[-1] == 0.30103
    assert result.shape == a.shape

    # log1p
    result = nd.log1p(a)
    assert result[-1] == 1.0986123
    assert result.shape == a.shape

    # log
    result = nd.log(a)
    assert result[-1] == 0.6931472
    assert result.shape == a.shape
    def generate_backgrad_data_constraint(
            net,
            data,
            label,
            max_iters=60,
            lr=0.1,
            iter_log=False,
            clip=False,
            loss_f=gluon.loss.SoftmaxCrossEntropyLoss(),
            bn_control=None,
            post_deal=None,
            sgd=None,
            threshold=None):
        backgrad_data = data.as_in_context(
            mx.cpu()) if post_deal is not None else data
        backgrad_data, (
            _loss,
            _bloss,
        ) = BackGradDataGenerator.generate_backgrad_data(
            net, backgrad_data, label, max_iters, lr, iter_log, clip, loss_f,
            bn_control, sgd, None)

        if post_deal is not None:
            tmp = (backgrad_data - data)**2
            diff = nd.sqrt(nd.sum(tmp, axis=0, exclude=True))
            MSE = nd.mean(tmp, axis=0, exclude=True)
            Savg = nd.mean((data)**2, axis=0, exclude=True)
            # SNR = 10 * nd.log10(Savg / (MSE))
            SNR = 10 * nd.log10(Savg / (MSE + EPS))
            post_deal(backgrad_data, data, diff, _loss, MSE, SNR)

        if threshold is not None:
            for i in range(3):
                backgrad_data[:, i, :, :] = backgrad_data[:, i, :, :].clip(
                    threshold[0, i].asscalar(), threshold[1, i].asscalar())

        return backgrad_data, (
            _loss,
            _bloss,
        )
Exemple #3
0
 def log10(x):
     return nd.log10(x)
    def generate_data_for_out(net,
                              origin_data,
                              max_iters=10,
                              lr=0.1,
                              use_batch_mean_std=False,
                              use_statistic=True,
                              show_per_iters=None,
                              loss_f=gluon.loss.SoftmaxCrossEntropyLoss(),
                              out_data=None,
                              begin_index=0,
                              bn_control=None,
                              post_deal=None,
                              sgd=None,
                              threshold=None):
        out_idx, iters = begin_index, 0
        diffs, losses, MSEs, SNRs, bloss = None, None, None, None, 0

        if bn_control is None:
            bn_backup = BNControl(
                net.net, use_batch_mean_std
            )  # to avoid update moving_mean/std when generate image
            bn_backup.store()
        for data, label in origin_data:
            backgrad_data, (
                _loss,
                _bloss,
            ) = BackGradDataGenerator.generate_backgrad_data_constraint(
                net,
                data,
                label,
                max_iters=max_iters,
                lr=lr,
                iter_log=False,
                clip=False,
                loss_f=loss_f,
                bn_control=bn_control,
                post_deal=post_deal,
                sgd=sgd,
                threshold=threshold)

            tmp = (backgrad_data - data)**2
            diff = nd.sqrt(nd.sum(tmp, axis=0, exclude=True))
            MSE = nd.mean(tmp, axis=0, exclude=True)
            Savg = nd.mean((data)**2, axis=0, exclude=True)
            # SNR = 10 * nd.log10(Savg / (MSE))
            SNR = 10 * nd.log10(Savg / (MSE + EPS))
            if sgd is not None or post_deal is not None:  # new change get new loss
                _loss = loss_f(net(backgrad_data.as_in_context(ctx)),
                               label.as_in_context(ctx))
                _bloss = nd.mean(_loss).asscalar()
            bloss += _bloss

            if diffs is None:
                diffs, losses, MSEs, SNRs = diff, _loss, MSE, SNR
            else:
                diffs, losses = nd.concat(diffs, diff,
                                          dim=0), nd.concat(losses,
                                                            _loss,
                                                            dim=0)
                MSEs, SNRs = nd.concat(MSEs, MSE, dim=0), nd.concat(SNRs,
                                                                    SNR,
                                                                    dim=0)

            # must copy to cpu, or will make gpu memory leak(not release)
            backgrad_data = inv_normalize(backgrad_data,
                                          clip=False,
                                          asnumpy=False)
            out_data[out_idx:out_idx +
                     data.shape[0], :, :, :] = backgrad_data.transpose(
                         (0, 2, 3, 1)).as_in_context(mx.cpu())
            out_idx += data.shape[0]

            if show_per_iters is not None and iters % show_per_iters == 0:
                show_images(inv_normalize(data[np.array(range(0, 25, 5)) %
                                               data.shape[0]],
                                          clip=False),
                            clip=False)
                show_images(backgrad_data[np.array(range(0, 25, 5)) %
                                          data.shape[0]].asnumpy(),
                            clip=False)
            iters += 1
        if bn_control is None:
            bn_backup.load()

        diffs, losses, MSEs, SNRs = diffs.asnumpy(), losses.asnumpy(
        ), MSEs.asnumpy(), SNRs.asnumpy()
        if use_statistic:
            return statistic(diffs), statistic(losses), statistic(
                MSEs), statistic(SNRs)
        else:
            return diffs, losses, MSEs, SNRs