コード例 #1
0
def test_none_Constant():
    # Tests equals
    # We had an error in the past with unpickling

    o1 = Constant(NoneTypeT(), None, name="NoneConst")
    o2 = Constant(NoneTypeT(), None, name="NoneConst")
    assert o1.equals(o2)
    assert NoneConst.equals(o1)
    assert o1.equals(NoneConst)
    assert NoneConst.equals(o2)
    assert o2.equals(NoneConst)

    # This trigger equals that returned the wrong answer in the past.
    import pickle

    import aesara
    from aesara import tensor

    x = tensor.vector("x")
    y = tensor.argmax(x)
    kwargs = {}
    # We can't pickle DebugMode
    if aesara.config.mode in ["DebugMode", "DEBUG_MODE"]:
        kwargs = {"mode": "FAST_RUN"}
    f = aesara.function([x], [y], **kwargs)
    pickle.loads(pickle.dumps(f))
コード例 #2
0
    def __init__(self, input, n_in, n_out, name_prefix=""):
        """Initialize the parameters of the logistic regression

        :type input: aesara.tensor.TensorType
        :param input: symbolic variable that describes the input of the
                      architecture (one minibatch)

        :type n_in: int
        :param n_in: number of input units, the dimension of the space in
                     which the datapoints lie

        :type n_out: int
        :param n_out: number of output units, the dimension of the space in
                      which the labels lie

        """

        # initialize with 0 the weights W as a matrix of shape (n_in, n_out)
        self.W = aesara.shared(
            value=np.zeros((n_in, n_out), dtype=aesara.config.floatX),
            name=name_prefix + "W",
        )

        # compute vector of class-membership probabilities in symbolic form
        self.p_y_given_x = tt.nnet.softmax(tt.dot(input, self.W))

        # compute prediction as class whose probability is maximal in
        # symbolic form
        self.y_pred = tt.argmax(self.p_y_given_x, axis=1)

        # parameters of the model
        self.params = [self.W]
コード例 #3
0
def test_GpuCrossentropySoftmaxArgmax1HotWithBias():
    # This is basic test for GpuCrossentropySoftmaxArgmax1HotWithBias
    # We check that we loop when their is too much threads

    n_in = 1000
    batch_size = 4097
    n_out = 1250

    if not isinstance(mode_with_gpu, aesara.compile.DebugMode):
        n_in = 4098
        n_out = 4099

    y = tt.lvector("y")

    b = tt.fvector("b")

    # we precompute the dot with big shape before to allow the test of
    # GpuCrossentropySoftmax1HotWithBiasDx to don't fail with the error
    # (the launch timed out and was terminated) on GPU card not
    # powerful enough. We need the big shape to check for corner
    # case.
    dot_result = tt.fmatrix("dot_result")

    # Seed numpy.random with config.unittests.rseed
    utt.seed_rng()

    xx = np.asarray(np.random.rand(batch_size, n_in), dtype=np.float32)
    yy = np.ones((batch_size, ), dtype="int32")
    b_values = np.zeros((n_out, ), dtype="float32")
    W_values = np.asarray(np.random.rand(n_in, n_out), dtype="float32")

    dot_value = np.asarray(np.dot(xx, W_values), dtype="float32")
    del W_values
    p_y_given_x = tt.nnet.softmax(dot_result + b)
    y_pred = tt.argmax(p_y_given_x, axis=-1)
    loss = -tt.mean(tt.log(p_y_given_x)[tt.arange(y.shape[0]), y])
    dW = tt.grad(loss, dot_result)
    classify = aesara.function(inputs=[y, b, dot_result],
                               outputs=[loss, y_pred, dW],
                               mode=mode_without_gpu)
    classify_gpu = aesara.function(inputs=[y, b, dot_result],
                                   outputs=[loss, y_pred, dW],
                                   mode=mode_with_gpu)

    assert any([
        isinstance(node.op, tt.nnet.CrossentropySoftmaxArgmax1HotWithBias)
        for node in classify.maker.fgraph.toposort()
    ])
    assert any([
        isinstance(node.op, GpuCrossentropySoftmaxArgmax1HotWithBias)
        for node in classify_gpu.maker.fgraph.toposort()
    ])

    out = classify(yy, b_values, dot_value)
    gout = classify_gpu(yy, b_values, dot_value)

    assert len(out) == len(gout) == 3
    utt.assert_allclose(out[0], gout[0])
    utt.assert_allclose(out[2], gout[2], atol=3e-6)
    utt.assert_allclose(out[1], gout[1])
コード例 #4
0
    def __init__(self, w, comp_dists, *args, **kwargs):
        # comp_dists type checking
        if not (
            isinstance(comp_dists, Distribution)
            or (
                isinstance(comp_dists, Iterable)
                and all(isinstance(c, Distribution) for c in comp_dists)
            )
        ):
            raise TypeError(
                "Supplied Mixture comp_dists must be a "
                "Distribution or an iterable of "
                "Distributions. Got {} instead.".format(
                    type(comp_dists)
                    if not isinstance(comp_dists, Iterable)
                    else [type(c) for c in comp_dists]
                )
            )
        shape = kwargs.pop("shape", ())

        self.w = w = at.as_tensor_variable(w)
        self.comp_dists = comp_dists

        defaults = kwargs.pop("defaults", [])

        if all_discrete(comp_dists):
            default_dtype = _conversion_map[aesara.config.floatX]
        else:
            default_dtype = aesara.config.floatX

            try:
                self.mean = (w * self._comp_means()).sum(axis=-1)

                if "mean" not in defaults:
                    defaults.append("mean")
            except AttributeError:
                pass
        dtype = kwargs.pop("dtype", default_dtype)

        try:
            if isinstance(comp_dists, Distribution):
                comp_mode_logps = comp_dists.logp(comp_dists.mode)
            else:
                comp_mode_logps = at.stack([cd.logp(cd.mode) for cd in comp_dists])

            mode_idx = at.argmax(at.log(w) + comp_mode_logps, axis=-1)
            self.mode = self._comp_modes()[mode_idx]

            if "mode" not in defaults:
                defaults.append("mode")
        except (AttributeError, ValueError, IndexError):
            pass

        super().__init__(shape, dtype, defaults=defaults, *args, **kwargs)
コード例 #5
0
 def compute_gpu(self, test_gpu_tensor, test_host_tensor, axis):
     M = self.get_gpu_tensor()
     f = aesara.function(
         [M],
         [tt.max(M, axis=axis), tt.argmax(M, axis=axis)],
         name="shape:" + str(test_gpu_tensor.shape) + "/axis:" + str(axis) + "/GPU",
         mode=mode_with_gpu,
     )
     check_if_gpu_reduce_in_graph(f)
     f(test_gpu_tensor)
     aesara_max, aesara_argmax = f(test_gpu_tensor)
     ref_max, ref_argmax = numpy_maxandargmax(test_host_tensor, axis=axis)
     utt.assert_allclose(ref_max, aesara_max)
     utt.assert_allclose(ref_argmax, aesara_argmax)
コード例 #6
0
ファイル: mixture.py プロジェクト: AlexAndorra/pymc3
    def __init__(self, w, comp_dists, mixture_axis=-1, *args, **kwargs):
        self.w = at.as_tensor_variable(w)
        if not isinstance(comp_dists, Distribution):
            raise TypeError(
                "The MixtureSameFamily distribution only accepts Distribution "
                f"instances as its components. Got {type(comp_dists)} instead."
            )
        self.comp_dists = comp_dists
        if mixture_axis < 0:
            mixture_axis = len(comp_dists.shape) + mixture_axis
            if mixture_axis < 0:
                raise ValueError(
                    "`mixture_axis` is supposed to be in shape of components' distribution. "
                    f"Got {mixture_axis + len(comp_dists.shape)} axis instead out of the bounds."
                )
        comp_shape = to_tuple(comp_dists.shape)
        self.shape = comp_shape[:mixture_axis] + comp_shape[mixture_axis + 1:]
        self.mixture_axis = mixture_axis
        kwargs.setdefault("dtype", self.comp_dists.dtype)

        # Compute the mode so we don't always have to pass a initval
        defaults = kwargs.pop("defaults", [])
        event_shape = self.comp_dists.shape[mixture_axis + 1:]
        _w = at.shape_padleft(
            at.shape_padright(w, len(event_shape)),
            len(self.comp_dists.shape) - w.ndim - len(event_shape),
        )
        mode = take_along_axis(
            self.comp_dists.mode,
            at.argmax(_w, keepdims=True),
            axis=mixture_axis,
        )
        self.mode = mode[(..., 0) + (slice(None), ) * len(event_shape)]

        if not all_discrete(comp_dists):
            mean = at.as_tensor_variable(self.comp_dists.mean)
            self.mean = (_w * mean).sum(axis=mixture_axis)
            if "mean" not in defaults:
                defaults.append("mean")
        defaults.append("mode")

        super().__init__(defaults=defaults, *args, **kwargs)
コード例 #7
0
 def test_argmax(self):
     self.check_nondiff_rop(tensor.argmax(self.mx, axis=1))