Exemplo n.º 1
0
def test_argmax_pushdown_bias():
    x = matrix()
    b = vector()

    out = argmax(softmax_with_bias(x, b), axis=-1)
    fgraph = FunctionGraph([x, b], [out])

    optdb.query(OPT_FAST_RUN).optimize(fgraph)

    types_to_check = (DimShuffle, Elemwise, Argmax)
    assert len(fgraph.toposort()) == 3

    for i, type in enumerate(types_to_check):
        assert isinstance(fgraph.toposort()[i].op, type)
    assert check_stack_trace(fgraph, ops_to_check=types_to_check)

    x = matrix()
    b = vector()
    out = max_and_argmax(softmax_with_bias(x, b), axis=-1)[0]
    fgraph = FunctionGraph([x, b], [out])

    optdb.query(OPT_FAST_RUN).optimize(fgraph)

    assert len(fgraph.toposort()) == 2
    assert isinstance(fgraph.toposort()[0].op, SoftmaxWithBias)
    assert isinstance(fgraph.toposort()[1].op, CAReduce)
    assert isinstance(fgraph.toposort()[1].op.scalar_op,
                      aesara.scalar.ScalarMaximum)
    assert check_stack_trace(fgraph, ops_to_check=(SoftmaxWithBias, CAReduce))
Exemplo n.º 2
0
    def __init__(self, input, n_in, n_out, name_prefix=""):
        """Initialize the parameters of the logistic regression

        :type input: TensorType
        :param input: symbolic variable that describes the input of the
                      architecture (one minibatch)

        :type n_in: int
        :param n_in: number of input units, the dimension of the space in
                     which the datapoints lie

        :type n_out: int
        :param n_out: number of output units, the dimension of the space in
                      which the labels lie

        """

        # initialize with 0 the weights W as a matrix of shape (n_in, n_out)
        self.W = aesara.shared(
            value=np.zeros((n_in, n_out), dtype=aesara.config.floatX),
            name=name_prefix + "W",
        )

        # compute vector of class-membership probabilities in symbolic form
        self.p_y_given_x = softmax(dot(input, self.W))

        # compute prediction as class whose probability is maximal in
        # symbolic form
        self.y_pred = argmax(self.p_y_given_x, axis=1)

        # parameters of the model
        self.params = [self.W]
Exemplo n.º 3
0
def test_none_Constant():
    # FIXME: This is a poor test.

    # Tests equals
    # We had an error in the past with unpickling

    o1 = Constant(NoneTypeT(), None, name="NoneConst")
    o2 = Constant(NoneTypeT(), None, name="NoneConst")
    assert o1.equals(o2)
    assert NoneConst.equals(o1)
    assert o1.equals(NoneConst)
    assert NoneConst.equals(o2)
    assert o2.equals(NoneConst)

    # This trigger equals that returned the wrong answer in the past.
    import pickle

    import aesara

    x = vector("x")
    y = argmax(x)
    kwargs = {}
    # We can't pickle DebugMode
    if aesara.config.mode in ["DebugMode", "DEBUG_MODE"]:
        kwargs = {"mode": "FAST_RUN"}
    f = aesara.function([x], [y], **kwargs)
    pickle.loads(pickle.dumps(f))
Exemplo n.º 4
0
def test_GpuCrossentropySoftmaxArgmax1HotWithBias():
    # This is basic test for GpuCrossentropySoftmaxArgmax1HotWithBias
    # We check that we loop when their is too much threads

    n_in = 1000
    batch_size = 4097
    n_out = 1250

    if not isinstance(mode_with_gpu, aesara.compile.debugmode.DebugMode):
        n_in = 4098
        n_out = 4099

    y = lvector("y")

    b = fvector("b")

    # we precompute the dot with big shape before to allow the test of
    # GpuCrossentropySoftmax1HotWithBiasDx to don't fail with the error
    # (the launch timed out and was terminated) on GPU card not
    # powerful enough. We need the big shape to check for corner
    # case.
    dot_result = fmatrix("dot_result")

    xx = np.asarray(np.random.rand(batch_size, n_in), dtype=np.float32)
    yy = np.ones((batch_size, ), dtype="int32")
    b_values = np.zeros((n_out, ), dtype="float32")
    W_values = np.asarray(np.random.rand(n_in, n_out), dtype="float32")

    dot_value = np.asarray(np.dot(xx, W_values), dtype="float32")
    del W_values
    p_y_given_x = aesara.tensor.nnet.softmax(dot_result + b)
    y_pred = argmax(p_y_given_x, axis=-1)
    loss = -mean(log(p_y_given_x)[aet.arange(y.shape[0]), y])
    dW = grad(loss, dot_result)
    classify = aesara.function(inputs=[y, b, dot_result],
                               outputs=[loss, y_pred, dW],
                               mode=mode_without_gpu)
    classify_gpu = aesara.function(inputs=[y, b, dot_result],
                                   outputs=[loss, y_pred, dW],
                                   mode=mode_with_gpu)

    assert any([
        isinstance(node.op,
                   aesara.tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias)
        for node in classify.maker.fgraph.toposort()
    ])
    assert any([
        isinstance(node.op, GpuCrossentropySoftmaxArgmax1HotWithBias)
        for node in classify_gpu.maker.fgraph.toposort()
    ])

    out = classify(yy, b_values, dot_value)
    gout = classify_gpu(yy, b_values, dot_value)

    assert len(out) == len(gout) == 3
    utt.assert_allclose(out[0], gout[0])
    utt.assert_allclose(out[2], gout[2], atol=3e-6)
    utt.assert_allclose(out[1], gout[1])
Exemplo n.º 5
0
 def compute_gpu(self, test_gpu_tensor, test_host_tensor, axis):
     M = self.get_gpu_tensor()
     f = aesara.function(
         [M],
         [aet_max(M, axis=axis), argmax(M, axis=axis)],
         name="shape:" + str(test_gpu_tensor.shape) + "/axis:" + str(axis) + "/GPU",
         mode=mode_with_gpu,
     )
     check_if_gpu_reduce_in_graph(f)
     f(test_gpu_tensor)
     aesara_max, aesara_argmax = f(test_gpu_tensor)
     ref_max, ref_argmax = numpy_maxandargmax(test_host_tensor, axis=axis)
     utt.assert_allclose(ref_max, aesara_max)
     utt.assert_allclose(ref_argmax, aesara_argmax)
Exemplo n.º 6
0
 def test_argmax(self):
     self.check_nondiff_rop(argmax(self.mx, axis=1))