def test_optimization_max(self):
        data = np.asarray(np.random.rand(2, 3), dtype=config.floatX)
        n = tensor.matrix()

        for axis in [0, 1, -1]:
            f = function([n], tensor.max(n, axis), mode=self.mode)
            topo = f.maker.fgraph.toposort()
            assert len(topo) == 1
            assert isinstance(topo[0].op, CAReduce)
            f(data)

            f = function([n], tensor.max(-n, axis), mode=self.mode)
            topo = f.maker.fgraph.toposort()
            assert len(topo) == 2
            assert isinstance(topo[0].op, Elemwise)
            assert isinstance(topo[0].op.scalar_op, scalar.Neg)
            assert isinstance(topo[1].op, CAReduce)
            f(data)

            f = function([n], -tensor.max(n, axis), mode=self.mode)
            topo = f.maker.fgraph.toposort()
            assert len(topo) == 2
            assert isinstance(topo[0].op, CAReduce)
            assert isinstance(topo[1].op, Elemwise)
            assert isinstance(topo[1].op.scalar_op, scalar.Neg)
            f(data)

            f = function([n], -tensor.max(-n, axis), mode=self.mode)
            topo = f.maker.fgraph.toposort()
            assert len(topo) == 1
            assert isinstance(topo[0].op, CAReduce)  # min
            f(data)
Exemple #2
0
 def backward(self, y_):
     y = y_.T
     y = aet.concatenate([y, -aet.sum(y, 0, keepdims=True)])
     # "softmax" with vector support and no deprication warning:
     e_y = aet.exp(y - aet.max(y, 0, keepdims=True))
     x = e_y / aet.sum(e_y, 0, keepdims=True)
     return floatX(x.T)
Exemple #3
0
def logsumexp(x, axis=None, keepdims=True):
    # Adapted from https://github.com/Theano/Theano/issues/1563
    x_max = aet.max(x, axis=axis, keepdims=True)
    x_max = aet.switch(aet.isinf(x_max), 0, x_max)
    res = aet.log(aet.sum(aet.exp(x - x_max), axis=axis,
                          keepdims=True)) + x_max
    return res if keepdims else res.squeeze()
Exemple #4
0
    def backward(self, rv_var, rv_value):
        if rv_var.broadcastable[-1]:
            # If this variable is just a bunch of scalars/degenerate
            # Dirichlets, we can't transform it
            return rv_value

        y = rv_value.T
        y = at.concatenate([y, -at.sum(y, 0, keepdims=True)])
        # "softmax" with vector support and no deprication warning:
        e_y = at.exp(y - at.max(y, 0, keepdims=True))
        x = e_y / at.sum(e_y, 0, keepdims=True)
        return floatX(x.T)
 def compute_gpu(self, test_gpu_tensor, test_host_tensor, axis):
     M = self.get_gpu_tensor()
     f = aesara.function(
         [M],
         [tt.max(M, axis=axis), tt.argmax(M, axis=axis)],
         name="shape:" + str(test_gpu_tensor.shape) + "/axis:" + str(axis) + "/GPU",
         mode=mode_with_gpu,
     )
     check_if_gpu_reduce_in_graph(f)
     f(test_gpu_tensor)
     aesara_max, aesara_argmax = f(test_gpu_tensor)
     ref_max, ref_argmax = numpy_maxandargmax(test_host_tensor, axis=axis)
     utt.assert_allclose(ref_max, aesara_max)
     utt.assert_allclose(ref_argmax, aesara_argmax)
Exemple #6
0
def tt_logsumexp(x, axis=None, keepdims=False):
    """Construct a Theano graph for a log-sum-exp calculation."""
    x_max_ = at.max(x, axis=axis, keepdims=True)

    if x_max_.ndim > 0:
        x_max_ = at.set_subtensor(x_max_[at.isinf(x_max_)], 0.0)
    elif at.isinf(x_max_):
        x_max_ = at.as_tensor(0.0)

    res = at.sum(at.exp(x - x_max_), axis=axis, keepdims=keepdims)
    res = at.log(res)

    if not keepdims:
        # SciPy uses the `axis` keyword here, but Theano doesn't support that.
        # x_max_ = tt.squeeze(x_max_, axis=axis)
        axis = np.atleast_1d(axis) if axis is not None else range(x_max_.ndim)
        x_max_ = x_max_.dimshuffle([
            i for i in range(x_max_.ndim)
            if not x_max_.broadcastable[i] or i not in axis
        ])

    return res + x_max_
Exemple #7
0
def max_pool(images, imgshp, maxpoolshp):
    """Implements a max pooling layer

    Takes as input a 2D tensor of shape batch_size x img_size and
    performs max pooling.  Max pooling downsamples by taking the max
    value in a given area, here defined by maxpoolshp. Outputs a 2D
    tensor of shape batch_size x output_size.

    :param images: 2D tensor containing images on which to apply convolution.
                   Assumed to be of shape batch_size x img_size
    :param imgshp: tuple containing image dimensions
    :param maxpoolshp: tuple containing shape of area to max pool over

    :return: out1, symbolic result (2D tensor)
    :return: out2, logical shape of the output
    """
    poolsize = np.int64(np.prod(maxpoolshp))

    # imgshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)
    # in the first case, default nfeatures to 1
    if np.size(imgshp) == 2:
        imgshp = (1,) + imgshp

    # construct indices and index pointers for sparse matrix, which,
    # when multiplied with input images will generate a stack of image
    # patches
    indices, indptr, spmat_shape, sptype, outshp = convolution_indices.conv_eval(
        imgshp, maxpoolshp, maxpoolshp, mode="valid"
    )

    #    print 'XXXXXXXXXXXXXXXX MAX POOLING LAYER XXXXXXXXXXXXXXXXXXXX'
    #    print 'imgshp = ', imgshp
    #    print 'maxpoolshp = ', maxpoolshp
    #    print 'outshp = ', outshp

    # build sparse matrix, then generate stack of image patches
    csc = aesara.sparse.CSM(sptype)(np.ones(indices.size), indices, indptr, spmat_shape)
    patches = sparse.structured_dot(csc, images.T).T

    pshape = tensor.stack(
        [
            images.shape[0] * tensor.as_tensor(np.prod(outshp)),
            tensor.as_tensor(imgshp[0]),
            tensor.as_tensor(poolsize),
        ]
    )
    patch_stack = tensor.reshape(patches, pshape, ndim=3)

    out1 = tensor.max(patch_stack, axis=2)

    pshape = tensor.stack(
        [
            images.shape[0],
            tensor.as_tensor(np.prod(outshp)),
            tensor.as_tensor(imgshp[0]),
        ]
    )
    out2 = tensor.reshape(out1, pshape, ndim=3)

    out3 = tensor.DimShuffle(out2.broadcastable, (0, 2, 1))(out2)

    return tensor.flatten(out3, 2), outshp
Exemple #8
0
 def test_max(self):
     # If we call max directly, we will return an CAReduce object
     # which doesn't have R_op implemented!
     # self.check_mat_rop_lop(tensor.max(self.mx, axis=[0,1])[0], ())
     self.check_mat_rop_lop(tensor.max(self.mx, axis=0), (self.mat_in_shape[1],))
     self.check_mat_rop_lop(tensor.max(self.mx, axis=1), (self.mat_in_shape[0],))
Exemple #9
0
                                    context_name=ctx_name)()
            mode = get_mode("FAST_RUN").including("gpuarray")
            f = aesara.function([guard_in],
                                op(guard_in),
                                mode=mode,
                                profile=False)
            result.cache[key] = f
        return f(inp)

    result.cache = dict()
    return result


f_gpua_min = f_compute(tt.min)
f_gpua_max = f_compute(tt.max)
f_gpua_absmax = f_compute(lambda x: tt.max(tt.abs_(x)))


class NanGuardMode(Mode):
    """
    A Aesara compilation Mode that makes the compiled function automatically
    detect NaNs and Infs and detect an error if they occur.

    Parameters
    ----------
    nan_is_error : bool
        If True, raise an error anytime a NaN is encountered.
    inf_is_error : bool
        If True, raise an error anytime an Inf is encountered.  Note that some
        pylearn2 modules currently use np.inf as a default value (e.g.
        mlp.max_pool) and these will cause an error if inf_is_error is True.