示例#1
0
 def make_node(self, value, *shape):
     value = as_gpuarray_variable(value, context_name=self.context_name)
     sh, bcast = alloc_validate_shape(shape)
     if value.ndim > len(sh):
         TypeError("The GpuAlloc value to use has more dimensions "
                   "than the specified shape", value.ndim, len(sh))
     otype = value.type.clone(broadcastable=bcast)
     return Apply(self, [value] + sh, [otype()])
示例#2
0
 def make_node(self, value, *shape):
     value = as_gpuarray_variable(value, context_name=self.context_name)
     sh, bcast = alloc_validate_shape(shape)
     if value.ndim > len(sh):
         TypeError("The GpuAlloc value to use has more dimensions "
                   "than the specified shape", value.ndim, len(sh))
     otype = value.type.clone(broadcastable=bcast)
     return Apply(self, [value] + sh, [otype()])
示例#3
0
    def make_node(self, a, *shape):
        a = basic.as_tensor_variable(a)
        shape = basic.as_tensor_variable(shape, ndim=1)

        shape, bcast = basic.alloc_validate_shape(shape)

        out = type(a.type)(dtype=a.type.dtype, broadcastable=bcast)()

        return theano.Apply(self, [a] + shape, [out])
示例#4
0
 def make_node(self, *shape):
     sh, bcast = alloc_validate_shape(shape)
     output = GpuArrayType(dtype=self.dtype, broadcastable=bcast,
                           context_name=self.context_name)()
     output.tag.values_eq_approx = tensor.type.values_eq_approx_always_true
     # The outut can contain nan/inf.
     output.type.filter_checks_isfinite = False
     output.tag.nan_guard_mode_check = False
     return Apply(self, sh, [output])
示例#5
0
 def make_node(self, *shape):
     sh, bcast = alloc_validate_shape(shape)
     output = GpuArrayType(dtype=self.dtype, broadcastable=bcast,
                           context_name=self.context_name)()
     output.tag.values_eq_approx = tensor.type.values_eq_approx_always_true
     # The outut can contain nan/inf.
     output.type.filter_checks_isfinite = False
     output.tag.nan_guard_mode_check = False
     return Apply(self, sh, [output])
示例#6
0
    def grad(self, inputs, outputs_gradients):
        a, *shape = inputs
        (dout, ) = outputs_gradients

        # Determine the dimensions that were added by broadcasting
        new_dims = list(range(dout.ndim - a.ndim))

        d_wrt_a = broadcast_to(dout, shape).sum(axis=new_dims)

        # Determine the dimensions that were broadcast
        _, shape_bcast = basic.alloc_validate_shape(shape)
        bcast_sums = [
            i for i, (
                a_b,
                s_b) in enumerate(zip(a.broadcastable, shape_bcast[-a.ndim:]))
            if a_b and not s_b
        ]

        if bcast_sums:
            d_wrt_a = d_wrt_a.sum(axis=bcast_sums, keepdims=True)

        return [d_wrt_a] + [
            grad_undefined(self, i, shp) for i, shp in enumerate(shape, 1)
        ]