Ejemplo n.º 1
0
    def test_optimization_max(self):
        data = np.asarray(np.random.rand(2, 3), dtype=config.floatX)
        n = matrix()

        for axis in [0, 1, -1]:
            f = function([n], tt_max(n, axis), mode=self.mode)
            topo = f.maker.fgraph.toposort()
            assert len(topo) == 1
            assert isinstance(topo[0].op, CAReduce)
            f(data)

            f = function([n], tt_max(-n, axis), mode=self.mode)
            topo = f.maker.fgraph.toposort()
            assert len(topo) == 2
            assert isinstance(topo[0].op, Elemwise)
            assert isinstance(topo[0].op.scalar_op, aes.Neg)
            assert isinstance(topo[1].op, CAReduce)
            f(data)

            f = function([n], -tt_max(n, axis), mode=self.mode)
            topo = f.maker.fgraph.toposort()
            assert len(topo) == 2
            assert isinstance(topo[0].op, CAReduce)
            assert isinstance(topo[1].op, Elemwise)
            assert isinstance(topo[1].op.scalar_op, aes.Neg)
            f(data)

            f = function([n], -tt_max(-n, axis), mode=self.mode)
            topo = f.maker.fgraph.toposort()
            assert len(topo) == 1
            assert isinstance(topo[0].op, CAReduce)  # min
            f(data)
Ejemplo n.º 2
0
 def test_max(self):
     # If we call max directly, we will return an CAReduce object
     # which doesn't have R_op implemented!
     # self.check_mat_rop_lop(tt_max(self.mx, axis=[0,1])[0], ())
     self.check_mat_rop_lop(tt_max(self.mx, axis=0),
                            (self.mat_in_shape[1], ))
     self.check_mat_rop_lop(tt_max(self.mx, axis=1),
                            (self.mat_in_shape[0], ))
Ejemplo n.º 3
0
def test_tensor_basics():
    y = vector("y")
    y.tag.test_value = np.r_[1.0, 2.0].astype(config.floatX)
    x = vector("x")
    x.tag.test_value = np.r_[3.0, 4.0].astype(config.floatX)
    A = matrix("A")
    A.tag.test_value = np.empty((2, 2), dtype=config.floatX)
    alpha = scalar("alpha")
    alpha.tag.test_value = np.array(3.0, dtype=config.floatX)
    beta = scalar("beta")
    beta.tag.test_value = np.array(5.0, dtype=config.floatX)

    # This should be converted into a `Gemv` `Op` when the non-JAX compatible
    # optimizations are turned on; however, when using JAX mode, it should
    # leave the expression alone.
    out = y.dot(alpha * A).dot(x) + beta * y
    fgraph = FunctionGraph([y, x, A, alpha, beta], [out])
    compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])

    out = maximum(y, x)
    fgraph = FunctionGraph([y, x], [out])
    compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])

    out = tt_max(y)
    fgraph = FunctionGraph([y], [out])
    compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
Ejemplo n.º 4
0
def max_pool(images, imgshp, maxpoolshp):
    """Implements a max pooling layer

    Takes as input a 2D tensor of shape batch_size x img_size and
    performs max pooling.  Max pooling downsamples by taking the max
    value in a given area, here defined by maxpoolshp. Outputs a 2D
    tensor of shape batch_size x output_size.

    :param images: 2D tensor containing images on which to apply convolution.
                   Assumed to be of shape batch_size x img_size
    :param imgshp: tuple containing image dimensions
    :param maxpoolshp: tuple containing shape of area to max pool over

    :return: out1, symbolic result (2D tensor)
    :return: out2, logical shape of the output
    """
    poolsize = np.int64(np.prod(maxpoolshp))

    # imgshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)
    # in the first case, default nfeatures to 1
    if np.size(imgshp) == 2:
        imgshp = (1, ) + imgshp

    # construct indices and index pointers for sparse matrix, which,
    # when multiplied with input images will generate a stack of image
    # patches
    indices, indptr, spmat_shape, sptype, outshp = convolution_indices.conv_eval(
        imgshp, maxpoolshp, maxpoolshp, mode="valid")

    #    print 'XXXXXXXXXXXXXXXX MAX POOLING LAYER XXXXXXXXXXXXXXXXXXXX'
    #    print 'imgshp = ', imgshp
    #    print 'maxpoolshp = ', maxpoolshp
    #    print 'outshp = ', outshp

    # build sparse matrix, then generate stack of image patches
    csc = aesara.sparse.CSM(sptype)(np.ones(indices.size), indices, indptr,
                                    spmat_shape)
    patches = sparse.structured_dot(csc, images.T).T

    pshape = aet.stack([
        images.shape[0] * aet.as_tensor(np.prod(outshp)),
        aet.as_tensor(imgshp[0]),
        aet.as_tensor(poolsize),
    ])
    patch_stack = reshape(patches, pshape, ndim=3)

    out1 = tt_max(patch_stack, axis=2)

    pshape = aet.stack([
        images.shape[0],
        aet.as_tensor(np.prod(outshp)),
        aet.as_tensor(imgshp[0]),
    ])
    out2 = reshape(out1, pshape, ndim=3)

    out3 = DimShuffle(out2.broadcastable, (0, 2, 1))(out2)

    return aet.flatten(out3, 2), outshp
Ejemplo n.º 5
0
 def compute_gpu(self, test_gpu_tensor, test_host_tensor, axis):
     M = self.get_gpu_tensor()
     f = aesara.function(
         [M],
         [tt_max(M, axis=axis), argmax(M, axis=axis)],
         name="shape:" + str(test_gpu_tensor.shape) + "/axis:" + str(axis) +
         "/GPU",
         mode=mode_with_gpu,
     )
     check_if_gpu_reduce_in_graph(f)
     f(test_gpu_tensor)
     aesara_max, aesara_argmax = f(test_gpu_tensor)
     ref_max, ref_argmax = numpy_maxandargmax(test_host_tensor, axis=axis)
     utt.assert_allclose(ref_max, aesara_max)
     utt.assert_allclose(ref_argmax, aesara_argmax)
Ejemplo n.º 6
0
                                    context_name=ctx_name)()
            mode = get_mode("FAST_RUN").including("gpuarray")
            f = aesara.function([guard_in],
                                op(guard_in),
                                mode=mode,
                                profile=False)
            result.cache[key] = f
        return f(inp)

    result.cache = dict()
    return result


f_gpua_min = f_compute(tt_min)
f_gpua_max = f_compute(tt_max)
f_gpua_absmax = f_compute(lambda x: tt_max(abs_(x)))


class NanGuardMode(Mode):
    """
    A Aesara compilation Mode that makes the compiled function automatically
    detect NaNs and Infs and detect an error if they occur.

    Parameters
    ----------
    nan_is_error : bool
        If True, raise an error anytime a NaN is encountered.
    inf_is_error : bool
        If True, raise an error anytime an Inf is encountered.  Note that some
        pylearn2 modules currently use np.inf as a default value (e.g.
        mlp.max_pool) and these will cause an error if inf_is_error is True.