def local_max_and_argmax(node):
    """
    If we don't use the argmax, change it to a max only.
    """
    if isinstance(node.op, T.MaxAndArgmax):
        axis = node.op.get_params(node)
        if len(node.outputs[1].clients) == 0:
            new = CAReduce(scal.maximum, axis)(node.inputs[0])
            return [new, None]

        if len(node.outputs[0].clients) == 0:
            return [None, T._argmax(node.inputs[0], axis)]
Exemple #2
0
 def test_infer_shape(self):
     for xsh, tosum in [((5, 6), None), ((5, 6), (0, 1)), ((5, 6), (0, )),
                        ((5, 6), (1, )), ((5, 6), (-1, )), ((5, 6), (-2, )),
                        ((2, 3, 4, 5), (0, 1, 3)), ((2, 3, 4, 5), (-2, -3)),
                        ((5, 0), None), ((5, 0), (0, )), ((5, 0), (1, )),
                        ((5, 6), ()), ((5, 0), ()), ((), None), ((), ())]:
         dtype = theano.config.floatX
         x = TensorType(dtype, [(entry == 1) for entry in xsh])('x')
         if tosum is None:
             tosum = range(len(xsh))
         xv = numpy.asarray(numpy.random.rand(*xsh), dtype=dtype)
         self._compile_and_check([x], [CAReduce(scalar.add, axis=tosum)(x)],
                                 [xv], CAReduce,
                                 ["local_cut_useless_reduce"])
Exemple #3
0
def local_max_and_argmax(node):
    """
    If we don't use the argmax, change it to a max only.
    """
    if node.op == T._max_and_argmax:
        if len(node.outputs[1].clients) == 0:
            #MaxAndArgmax support variable axis,
            #but CAReduce support only constant axis.
            try:
                axis = get_scalar_constant_value(node.inputs[1])
            except NotScalarConstantError:
                return False

            new = CAReduce(scal.maximum, axis)(node.inputs[0])
            return [new, None]
def local_max_to_min(node):
    """
    change -(max(-x)) to min

    This is tested in tensor/tests/test_basic.py:test_min_max

    :note: we don't need an opt that will do the reverse as by default
           the interface put only MaxAndArgmax into the graph.
    """
    if node.op == T.neg and node.inputs[0].owner:
        max = node.inputs[0]
        if (max.owner and
            isinstance(max.owner.op, CAReduce)
            and max.owner.op.scalar_op == scal.maximum):
            neg = max.owner.inputs[0]
            if neg.owner and neg.owner.op == T.neg:
                return [CAReduce(scal.minimum,
                                 max.owner.op.axis)(neg.owner.inputs[0])]

    return False
    def apply(self, fgraph):
        did_something = True
        while did_something:
            nodelist = fgraph.toposort()
            did_something = False
            for node in nodelist:
                if node.op == T._max_and_argmax:
                    if len(node.outputs[1].clients) == 0:
                        try:
                            axis = get_scalar_constant_value(node.inputs[1])
                        except NotScalarConstantError:
                            return False

                        new = CAReduce(scal.maximum, axis)(node.inputs[0])
                        try:
                            fgraph.replace_all_validate(
                                ((node.outputs[0], new),),
                                reason=self.__class__.__name__)
                            did_something = True
                            break
                        except InconsistencyError, e:
                            pass
def local_max_and_argmax(node):
    """
    If we don't use the argmax, change it to a max only.
    """
    if node.op == T._max_and_argmax:
        if len(node.outputs[1].clients) == 0:
            # MaxAndArgmax support variable axis,
            # but CAReduce support only constant axis.
            if node.inputs[1].data is None:
                axis = None
            else:
                try:
                    axis = get_scalar_constant_value(node.inputs[1])
                except NotScalarConstantError:
                    axis = node.inputs[1]
                    if not isinstance(axis, T.TensorConstant):
                        return False
                    axis = axis.data

            new = CAReduce(scal.maximum, axis)(node.inputs[0])
            return [new, None]

        if len(node.outputs[0].clients) == 0:
            return [None, T._argmax(node.inputs[0], node.inputs[1])]
Exemple #7
0
    def with_linker(self, linker, scalar_op=scalar.add, dtype="floatX",
                    test_nan=False, tensor_op=None):
        for xsh, tosum in [((5, 6), None),
                           ((5, 6), (0, 1)),
                           ((5, 6), (0, )),
                           ((5, 6), (1, )),
                           ((5, 6), (-1, )),
                           ((5, 6), (-2, )),
                           ((5, 6), ()),
                           ((2, 3, 4, 5), (0, 1, 3)),
                           ((2, 3, 4, 5), (-2, -3)),
                           ((5, 0), None),
                           ((5, 0), (0, )),
                           ((5, 0), (1, )),
                           ((5, 0), ()),
                           ((), None),
                           ((), ())]:
            if dtype == "floatX":
                dtype = theano.config.floatX
            x = TensorType(dtype, [(entry == 1) for entry in xsh])('x')
            if tensor_op is None:
                e = CAReduce(scalar_op, axis=tosum)(x)
            else:
                e = tensor_op(x, axis=tosum)

            if tosum is None:
                tosum = range(len(xsh))

            f = copy(linker).accept(FunctionGraph([x], [e])).make_function()
            xv = numpy.asarray(numpy.random.rand(*xsh))

            if not "int" in dtype:
                xv = numpy.asarray(xv, dtype=dtype)
            else:
                xv = numpy.asarray(xv < 0.5, dtype=dtype)

            if test_nan and xv.size > 0:
                if len(xsh) > 0:
                    xv = xv.flatten()
                    xv[0] = numpy.nan
                    xv = xv.reshape(*xsh)
                else:
                    xv = numpy.asarray(numpy.nan, dtype=dtype)
            zv = xv
            numpy_raised = False
            if len(tosum) > 1 and any([a < 0 for a in tosum]):
                #In that case, we need to use the good order of axis
                #in the reduction.
                axis2 = []
                for a in tosum:
                    if a < 0:
                        axis2.append(a + len(xsh))
                    else:
                        axis2.append(a)
                assert len(axis2) == len(tosum)
                tosum = tuple(axis2)
            if tensor_op == tensor.all:
                for axis in reversed(sorted(tosum)):
                    zv = numpy.all(zv, axis)
                if len(tosum) == 0:
                    zv = zv != 0
            elif tensor_op == tensor.any:
                for axis in reversed(sorted(tosum)):
                    zv = numpy.any(zv, axis)
                if len(tosum) == 0:
                    zv = zv != 0
            elif scalar_op == scalar.add:
                for axis in reversed(sorted(tosum)):
                    zv = numpy.add.reduce(zv, axis)
            elif scalar_op == scalar.mul:
                for axis in reversed(sorted(tosum)):
                    zv = numpy.multiply.reduce(zv, axis)
            elif scalar_op == scalar.maximum:
                try:
                    for axis in reversed(sorted(tosum)):
                        zv = numpy.maximum.reduce(zv, axis)
                except ValueError:
                    numpy_raised = True
            elif scalar_op == scalar.minimum:
                try:
                    for axis in reversed(sorted(tosum)):
                        zv = numpy.minimum.reduce(zv, axis)
                except ValueError:
                    numpy_raised = True
            elif scalar_op == scalar.or_:
                for axis in reversed(sorted(tosum)):
                    zv = numpy.bitwise_or.reduce(zv, axis)
            elif scalar_op == scalar.and_:
                for axis in reversed(sorted(tosum)):
                    zv = numpy.bitwise_and.reduce(zv, axis)
            elif scalar_op == scalar.xor:
                # There is no identity value for the xor function
                # So we can't support shape of dimensions 0.
                if numpy.prod(zv.shape) == 0:
                    continue
                for axis in reversed(sorted(tosum)):
                    zv = numpy.bitwise_xor.reduce(zv, axis)
            else:
                raise Exception(
                    "Test for CAReduce with scalar_op %s not implemented" %
                    str(scalar_op))
            if scalar_op in [scalar.maximum, scalar.minimum] and numpy_raised:
                try:
                    out = f(xv)
                    assert out.dtype == dtype
                except ValueError:
                    pass
                else:
                    self.fail()
            else:
                # numpy.{all,any} return bool type,
                # but theano ops return an int8 array instead
                if scalar_op in [scalar.and_, scalar.or_]:
                    zv = numpy.asarray(zv, dtype='int8')
                if test_nan:
                    self.assertTrue(theano.tensor.TensorType.values_eq(f(xv),
                                                                       zv),
                                    (f(xv), zv))
                else:
                    f_xv = f(xv)
                    self.assertTrue((f_xv.shape == zv.shape), (f_xv, zv))
                    self.assertTrue(numpy.allclose(f_xv, zv), (f_xv, zv))

            #test CAReduce.infer_shape
            #the Shape op don't implement c_code!
            if isinstance(linker, gof.PerformLinker):
                x = TensorType(dtype, [(entry == 1) for entry in xsh])('x')
                if tensor_op is None:
                    e = CAReduce(scalar_op, axis=tosum)(x)
                else:
                    e = tensor_op(x, axis=tosum)
                if tosum is None:
                    tosum = range(len(xsh))
                f = copy(linker).accept(FunctionGraph([x],
                     [e.shape])).make_function()
                if not(scalar_op in [scalar.maximum, scalar.minimum] and
                       ((xsh == () or numpy.prod(xsh) == 0))):
                    assert all(f(xv) == zv.shape)