Exemplo n.º 1
0
def local_lrnGrad_mkl(node):
    if not mkl_available():
        return

    if not isinstance(node.op, mkl_lrn.AbstractLRNGrad):
        return

    if node.inputs[0].type.ndim != 4:
        return

    try:
        x, gz, = node.inputs
        x_u2i = U2ILRN(alpha=node.op.alpha,
                       beta=node.op.beta,
                       k=node.op.k,
                       n=node.op.n)(x)
        lrnOut = mkl_lrn.LRN(alpha=node.op.alpha,
                             beta=node.op.beta,
                             k=node.op.k,
                             n=node.op.n)(x_u2i)
        gz_u2i = I2UGrad()(lrnOut, gz)
        lrnGradOut = mkl_lrn.LRNGrad(alpha=node.op.alpha,
                                     beta=node.op.beta,
                                     k=node.op.k,
                                     n=node.op.n)(x_u2i, gz_u2i)
        gx_i2u = U2IGrad()(x, lrnGradOut)
        rval = gx_i2u
        return [rval]
    except Exception as e:
        msg = ('Failed to apply local opt to Op %s. '
               'Exception message: %s\n') % (node.op, str(e))
        _logger.warning(msg)
        return
Exemplo n.º 2
0
    def test_lrn_float64(self):
        old_floatX = theano.config.floatX
        theano.config.floatX = 'float64'

        x = tensor.dtensor4('x')
        x_internal = U2ILRN()(x)
        z_internal = mkl_lrn.LRN()(x_internal)
        z = I2U()(z_internal)

        f = theano.function([x], z, mode=mode_with_mkl)
        imval = numpy.random.rand(4, 2, 4, 4).astype(theano.config.floatX)

        f(imval)
        assert f(imval).dtype == 'float64'

        theano.config.floatX = old_floatX
Exemplo n.º 3
0
    def test_lrn_grad_float32(self):
        old_floatX = theano.config.floatX
        theano.config.floatX = 'float32'

        x = tensor.ftensor4('x')
        x_internal = U2ILRN()(x)
        z_internal = mkl_lrn.LRN()(x_internal)
        z = I2U()(z_internal)
        z_sum = tensor.sum(z)
        g = tensor.grad(z_sum, [x])

        f = theano.function([x], g, mode=mode_with_mkl)
        imval = numpy.random.rand(4, 2, 4, 4).astype(theano.config.floatX)

        f(imval)
        assert f(imval)[0].dtype == 'float32'

        theano.config.floatX = old_floatX
Exemplo n.º 4
0
    def test_mkl_lrn_value(self):
        shape = [(2, 15, 3, 4), (256, 256, 27, 27)]  # NCHW
        n = 5
        k = 2
        alpha = 0.0001
        beta = 0.75

        x = tensor.dtensor4('x')
        x_internal = U2ILRN()(x)
        z_internal = mkl_lrn.LRN(alpha, beta, k, n)(x_internal)
        z = I2U()(z_internal)

        fz = theano.function([x], z, mode=mode_with_mkl)
        # for shape[0]
        input_data = numpy.random.rand(*shape[0]).astype(theano.config.floatX)
        t = self.ground_truth_normalizer(input_data,
                                         k=k,
                                         n=n,
                                         alpha=alpha,
                                         beta=beta)
        assert (fz(input_data)).shape == t.shape
        assert numpy.allclose(fz(input_data), t)