def compile(self, network):
        if self.output is not None:
            raise Exception("Layer " + self.name + " is already compiled!")

        def _invert_out_of_bounds_gradient(op, grad):
            is_above_upper_bound = tf.greater(
                op.inputs[0], tf.constant(self.upper_bound, dtype=tf.float32))
            is_under_lower_bound = tf.less(
                op.inputs[0], tf.constant(self.lower_bound, dtype=tf.float32))
            is_gradient_positive = tf.greater(grad,
                                              tf.constant(0, dtype=tf.float32))
            is_gradient_negative = tf.less(grad,
                                           tf.constant(0, dtype=tf.float32))

            invert_gradient = tf.logical_or(
                tf.logical_and(is_above_upper_bound, is_gradient_negative),
                tf.logical_and(is_under_lower_bound, is_gradient_positive))

            return tf.where(invert_gradient, -grad, grad)

        gradient_op_name = network.name + "_" + self.name + "_gradient_op_" + str(
            uuid.uuid4())[:8]
        tf.RegisterGradient(gradient_op_name)(
            _invert_out_of_bounds_gradient
        )  # see _MySquareGrad for grad example

        with tf.get_default_graph().gradient_override_map(
            {"Identity": gradient_op_name}):
            self.output = tf.identity(self.input_layers[0].get_output(),
                                      name=(network.name + "_" + self.name))

        self.parameters = []
Exemple #2
0
        def gradient_wrapped_op(*args, Tout=None, shape_out=None, **kwargs):
            if (Tout is None): Tout = self.Tout
            if isinstance(Tout, str): Tout = tf.as_dtype(Tout)
            try:
                tf.RegisterGradient(self.name)(self.grad)
            except:
                pass
            with tf.get_default_graph().gradient_override_map(
                    self.op_type_map):
                res = tf.py_func(numpy_op, args, Tout, name=self.name)

                def _set_shapes(tensors, shapes):
                    if isinstance(tensors, (list, tuple)):
                        tensors = [
                            _set_shape(t, s) for t, s in zip(tensors, shapes)
                        ]

                    elif isinstance(shapes, tf.Tensor):
                        tensors = tf.reshape(tensors, shapes)
                    else:
                        tensors.set_shape(shapes)
                    return tensors

                if (shape_out is not None):
                    res = _set_shapes(res, shape_out)
                return res
Exemple #3
0
def my_op(func, inp, grad, name=None, victim_op='Identity'):
    # Need to generate a unique name to avoid duplicates.
    rnd_name = 'my_gradient' + str(np.random.randint(0, 1E+8))
    tf.RegisterGradient(rnd_name)(grad)
    g = tf.get_default_graph()
    with g.gradient_override_map({victim_op: rnd_name}):
        return func(inp, name=name)
Exemple #4
0
def my_py_func(func, inp, Tout, stateful=False, name=None, my_grad_func=None):
    # Need to generate a unique name to avoid duplicates:
    random_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
    tf.RegisterGradient(random_name)(my_grad_func)  # see _my_sigmoid_grad for grad example
    g = tf.get_default_graph()
    with g.gradient_override_map({"PyFunc": random_name, "PyFuncStateless": random_name}):
        return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
Exemple #5
0
def _py_function(py_func,
                 incoming,
                 out_types,
                 stateful=True,
                 name=None,
                 grad=None):
    """
    Define custom python function which takes also a gradient op as argument.

    Args:
        py_func (function): python function to run
        incoming (list): list of `Tensor` objects
        out_types (list): list or tuple of TensorFlow data types
        stateful (Boolean): If True, the function should be considered stateful.
        name (string): variable scope (optional)
        grad (function): gradient policy to apply

    Returns:
        The result of applying the function py_func() during the forward pass and the gradient policy grad()
        during the backward pass.

    """
    # generate a unique name to avoid duplicates
    rnd_name = 'PyFuncGrad' + str(np.random.randint(0, int(1E+8)))
    tf.RegisterGradient(rnd_name)(grad)

    g = tf.get_default_graph()
    with g.gradient_override_map({"PyFunc": rnd_name}):
        res = tf.py_func(py_func,
                         incoming,
                         out_types,
                         stateful=stateful,
                         name=name)
        res[0].set_shape(incoming[0].get_shape())
        return res
Exemple #6
0
def _py_func_with_gradient(func,
                           inp,
                           Tout,
                           stateful=True,
                           name=None,
                           grad_func=None):
    """
  PyFunc defined as given by Tensorflow
  :param func: Custom Function
  :param inp: Function Inputs
  :param Tout: Ouput Type of out Custom Function
  :param stateful: Calculate Gradients when stateful is True
  :param name: Name of the PyFunction
  :param grad: Custom Gradient Function
  :return:
  """
    # Generate random name in order to avoid conflicts with inbuilt names
    rnd_name = 'PyFuncGrad-' + '%0x' % getrandbits(30 * 4)

    # Register Tensorflow Gradient
    tf.RegisterGradient(rnd_name)(grad_func)

    # Get current graph
    g = tf.get_default_graph()

    # Add gradient override map
    with g.gradient_override_map({
            "PyFunc": rnd_name,
            "PyFuncStateless": rnd_name
    }):
        return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
Exemple #7
0
def scatter_add(src, indices, size=None, fill_value=0):
    def _scatter_add(src, indices, size, fill_value):
        assert len(indices.shape) == 1
        assert src.shape[0] == indices.shape[0]
        n = src.shape[0]

        out_shape = list(src.shape)
        out_shape[0] = size
        out = np.full(out_shape, fill_value, dtype=src.dtype)
        for i in range(n):
            out[indices[i]] += src[i]
        return out

    def _scatter_add_grad_op(op, grad):
        grads = [None for _ in op.inputs]
        src = op.inputs[0]
        indices = op.inputs[1]
        grads[0] = tf.gather(grad, indices)
        return grads

    grad_name = 'ScatterAddGrad_' + str(uuid.uuid4())
    tf.RegisterGradient(grad_name)(_scatter_add_grad_op)

    if size is None:
        size = tf.reduce_max(indices) + 1

    g = tf.get_default_graph()
    with g.gradient_override_map({"PyFunc": grad_name}):
        out = tf.py_func(_scatter_add, [src, indices, size, fill_value],
                         src.dtype)
    out.set_shape([None, src.shape[1]])
    return out
Exemple #8
0
    def py_func(self,
                func,
                inputs,
                Tout,
                shape_out,
                stateful=True,
                name=None,
                grad=None):
        if grad is None:
            result = tf.py_func(func,
                                inputs,
                                Tout,
                                stateful=stateful,
                                name=name)
        else:
            # Need to generate a unique name to avoid duplicates:
            rnd_name = 'PyFuncGrad' + str(uuid.uuid4())

            tf.RegisterGradient(rnd_name)(
                grad)  # see _MySquareGrad for grad example
            g = tf.get_default_graph()
            with g.gradient_override_map({"PyFunc": rnd_name}):
                result = tf.py_func(func,
                                    inputs,
                                    Tout,
                                    stateful=stateful,
                                    name=name)
        if shape_out is not None:
            result.set_shape(shape_out)
        return result
Exemple #9
0
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
    # Need to generate a unique name to avoid duplicates:
    rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
    tf.RegisterGradient(rnd_name)(grad)  # see _MySquareGrad for grad example
    g = tf.get_default_graph()
    with g.gradient_override_map({"PyFunc": rnd_name}):
        return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
    # Generating a unique name to avoid duplicates
    rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+2))
    tf.RegisterGradient(rnd_name)(grad)
    g = tf.get_default_graph()
    with g.gradient_override_map({"PyFunc": rnd_name}):
        return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
    def py_func(self, func, inp, Tout, stateful=True, name=None, grad=None):
        """
        PyFunc defined as given by Tensorflow

        :param func:        Custom Function
        :param inp:         Function Inputs
        :param Tout:        Ouput Type of out Custom Function
        :param stateful:    Calculate Gradients when stateful is True
        :param name:        Name of the PyFunction
        :param grad:        Custom Gradient Function
        :return:
        """
        # Generate Random Gradient name to avoid conflicts with inbuilt names
        rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 2**32 - 1))

        # Register Tensorflow Gradient
        tf.RegisterGradient(rnd_name)(grad)

        # Get current graph
        g = tf.get_default_graph()

        # Add gradient override map
        with g.gradient_override_map({
                'PyFunc': rnd_name,
                'PyFuncStateless': rnd_name
        }):
            return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
Exemple #12
0
 def neural_renderer_texture(self,
                             verts,
                             nverts,
                             tris,
                             ntris,
                             textures,
                             name=None,
                             stateful=True):
     with ops.name_scope(name, "NeuralRendererTexture") as name:
         rnd_name = 'NeuralRendererTextureGrad' + str(
             np.random.randint(0, 1E+8))
         tf.RegisterGradient(rnd_name)(
             self._neural_renderer_texture_grad
         )  # see _MySquareGrad for grad example
         g = tf.get_default_graph()
         self.to_gpu()
         self.verts_size = verts.shape
         self.textures_size = textures.shape
         with g.gradient_override_map({
                 "PyFunc": rnd_name,
                 "PyFuncStateless": rnd_name
         }):
             img = tf.py_func(self.forward_img,
                              [verts, nverts, tris, ntris, textures],
                              [tf.float32],
                              stateful=stateful,
                              name=name)[0]
             img.set_shape([
                 verts.shape[0], 3, self.renderer.image_size,
                 self.renderer.image_size
             ])
             return img
Exemple #13
0
def SigJoin(x, y, m, fixedLast=None):
    rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
    if fixedLast is None:
        tf.RegisterGradient(rnd_name)(_sigJoinGrad)
        g = tf.get_default_graph()
        with g.gradient_override_map({"PyFunc": rnd_name}):
            return tf.py_func(_sigJoinImp, [x, y, m],
                              tf.float32,
                              name="SigJoin")
    else:
        tf.RegisterGradient(rnd_name)(_sigJoinGradFixed)
        g = tf.get_default_graph()
        with g.gradient_override_map({"PyFunc": rnd_name}):
            return tf.py_func(_sigJoinFixedImp, [x, y, m, fixedLast],
                              tf.float32,
                              name="SigJoin")
Exemple #14
0
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
    rnd_name = 'PyFunGrad' + str(np.random.randint(0, 1E+8))

    tf.RegisterGradient(rnd_name)(grad)
    g = tf.get_default_graph()
    with g.gradient_override_map({"PyFunc": rnd_name}):
        return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
def create_adjustable_model(input_node, weights, thresholds):

    with input_node.graph.as_default():

        def _clip_grad_op(op, grad):
            x = op.inputs[0]
            x_min = op.inputs[1]
            x_max = op.inputs[2]
            cond = tf.logical_or(tf.less(x, x_min), tf.greater(x, x_max))
            return_grad = tf.where(cond, tf.zeros_like(grad, name="zero_grad"),
                                   grad)
            return return_grad, tf.constant(
                0, name="constant_min_grad"), tf.constant(
                    0, name="constant_max_grad")

        # Register the gradient with a unique id
        grad_name = "MyClipGrad_" + str(uuid.uuid4())
        tf.RegisterGradient(grad_name)(_clip_grad_op)

        with input_node.graph.gradient_override_map({
                "Round": "Identity",
                "ClipByValue": grad_name
        }):
            mnasnet_model_quantized = MNasNetModelAdjustable(
                input_node, weights, thresholds)

        with tf.name_scope("float_model"):
            mnasnet_model_float = MNasNetModelFloat(input_node, weights)

    return mnasnet_model_float, mnasnet_model_quantized
Exemple #16
0
    def with_custom_gradient(self,
                             function,
                             inputs,
                             gradient,
                             input_index=0,
                             output_index=None,
                             name_base="custom_gradient_func"):
        # Setup custom gradient
        gradient_name = name_base + "_" + str(uuid.uuid4())
        tf.RegisterGradient(gradient_name)(gradient)

        g = tf.get_default_graph()
        with g.gradient_override_map({"Identity": gradient_name}):
            fake_function = tf.identity(inputs[input_index])

        outputs = function(*inputs)
        output = outputs if output_index is None else outputs[output_index]
        output_with_gradient = fake_function + tf.stop_gradient(output -
                                                                fake_function)
        if output_index is None:
            return output_with_gradient
        else:
            outputs = list(outputs)
            outputs[output_index] = output_with_gradient
            return outputs
def tf_func(func, grad=None):
    rnd_name = 'TfFuncGrad' + str(np.random.randint(0, 1e+4))

    tf.RegisterGradient(rnd_name)(grad)
    g = tf.get_default_graph()
    with g.gradient_override_map({"TfFunc": rnd_name}):
        return func
def _py_func(func, inp, Tout, stateful=False, name=None, grad=None):
    """
    Copied from random internet forum. It seems to be important to give
    PyFunc to give an random name in override map to properly register gradients

    PyFunc defined as given by Tensorflow
    :param func: Custom Function
    :param inp: Function Inputs
    :param Tout: Output Type of out Custom Function
    :param stateful: Calculate Gradients when stateful is True
    :param name: Name of the PyFunction
    :param grad: Custom Gradient Function
    :return:
    """
    import tensorflow as tf
    # Generate Random Gradient name in order to avoid conflicts with inbuilt names
    global _num_generated_ops
    rnd_name = 'PyFuncGrad' + str(_num_generated_ops) + 'ABC@a1b2c3'
    _num_generated_ops += 1

    # Register Tensorflow Gradient
    tf.RegisterGradient(rnd_name)(grad)

    # Get current graph
    g = get_default_graph()

    # Add gradient override map
    with g.gradient_override_map({"PyFunc": rnd_name, "PyFuncStateless": rnd_name}):
        return py_func(func, inp, Tout, stateful=stateful, name=name)
Exemple #19
0
def custom_op(op: Union[CustomOp, CompilableOp, TFCompiledOp], stateful=True, name=None,
              use_autodiff=False, compile_only=False, return_handle=False):
    """
        Registers a custom Tensorflow operator from `CustomOp`, 
        `CompilableOp`, or `TFCompiledOp` objects.
        @param op The custom operator. If numpy is not used, automatic 
                    differentiation via Tensorflow applies.
        @param stateful True if the operation is not a pure function (enables
                        sub-expression elimination optimizations if False).
        @param name Specify a custom name for this operation.
        @param use_autodiff If true, uses tensorflow tensors, otherwise 
                            assumes numpy arrays.
        @param compile_only If true, returns a TFCompiledOp instead of an instantiated op
        @param return_handle (for C++ ops) If true, also returns a direct handle
                             to the operator object and library as a 3-tuple:
                             (operator, library, handle).
        @return A tf.Operation object (or a function) that calls the custom operator.
    """
    if isinstance(op, CompilableOp):
        result = _custom_cpp_op(op, stateful, name)
        if compile_only:
            return result
        else:
            op = result
    if isinstance(op, TFCompiledOp):
        result = _create_op_handle(op)
        if return_handle:
            return result
        else:
            return result[0]
    elif isinstance(op, CustomOp):
        if use_autodiff == True:
            return op.forward

        def _fwd(*inputs):
            return op.forward(*inputs)
        def _bwd(tfop, *grads):
            def _actual_bwd(*args):
                return op.backward(args[:len(grads)], 
                                     args[len(grads):(len(grads)+len(tfop.inputs))], 
                                     args[(len(grads)+len(tfop.inputs)):])
            return tf.py_func(_actual_bwd, 
                              (list(grads) + list(tfop.inputs) + list(tfop.outputs)), 
                              [inp.dtype for inp in op.input_descriptors], 
                              stateful=stateful)

        # Gradient replacement adapted from https://gist.github.com/harpone/3453185b41d8d985356cbe5e57d67342

        # Generate a unique name to avoid duplicates
        rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
        tf.RegisterGradient(rnd_name)(_bwd)

        def result(*inputs):
            g = tf.get_default_graph()
            with g.gradient_override_map({"PyFunc": rnd_name, "PyFuncStateless": rnd_name}):
                return tf.py_func(_fwd, inputs, 
                                  [out.dtype for out in op.output_descriptors],
                                  stateful=stateful, name=name)
        return result
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
    rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))  # generate a unique name to avoid duplicates
    tf.RegisterGradient(rnd_name)(grad)
    g = tf.get_default_graph()
    with g.gradient_override_map({"PyFunc": rnd_name}):
        res = tf.py_func(func, inp, Tout, stateful=stateful, name=name)
        res[0].set_shape(inp[0].get_shape())
        return res
def py_func(func, inp, Tout, name=None, grad=None):
    """Redfine tf.py_func to include gradients"""
    temp_name = next(tempfile._get_candidate_names())
    _name = 'PyFuncGrad%s' % temp_name
    tf.RegisterGradient(_name)(grad)
    g = tf.get_default_graph()
    with g.gradient_override_map({"PyFunc": _name}):
        return tf.py_func(func, inp, Tout, name=name)
Exemple #22
0
def py_func(func, inp, Tout, graph, stateful=True, name=None, grad=None):
    rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))

    tf.RegisterGradient(rnd_name)(grad)
    with graph.gradient_override_map({"PyFunc": rnd_name}):
        loss = tf.py_func(func, inp, Tout, stateful=stateful, name=name)[0]
        loss.set_shape([])
        return loss
Exemple #23
0
def py_func(func, inp, Tout, stateful=True, name=None, grad_func=None):
    rand_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
    tf.RegisterGradient(rand_name)(grad_func)
    g = tf.get_default_graph()
    with g.gradient_override_map({'PyFunc': rand_name}):  #自定义反向传播中的梯度值
        return tf.py_func(
            func, inp, Tout, stateful=stateful, name=name
        )  #该函数重构一个python函数,并将其作为一个TensorFlow的op使用。给定一个输入和输出都是numpy数组的python函数’func’,py_func函数将func重构进TensorFlow的计算图中。
Exemple #24
0
def LogSig(x, s, method=""):
    rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
    tf.RegisterGradient(rnd_name)(_logSigGrad(s, method))
    g = tf.get_default_graph()
    with g.gradient_override_map({"PyFunc": rnd_name}):
        return tf.py_func(_logSigImp(s, method), [x],
                          tf.float64,
                          name="LogSig")
Exemple #25
0
def py_func_with_grad(func, inp, Tout, stateful=True, name=None, grad=None):
    num = []
    for i in range(100):
        num.append(str(np.random.randint(0, 10)))
    rnd_name = 'PyFuncGrad' + ''.join(num)
    tf.RegisterGradient(rnd_name)(grad)
    g = tf.get_default_graph()
    with g.gradient_override_map({"PyFunc": rnd_name}):
        return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
Exemple #26
0
def pnp(inputs):
    def _pnp(objPts, imgPts, cmat, distcoeffs):
        global fff, e2
        if fff:
            e2 = time.time()
            fff = False
        res = np.zeros((objPts.shape[0], 6), np.float64)
        NumericLayers.solvePnP(objPts, imgPts, cmat, res, 2)
        return res

    def _pnp_grad(objPts, imgPts, cmat, distcoeffs, grad):
        (h_len, m, n) = objPts.shape
        jacobean = np.zeros((h_len, m, n), np.float64)
        eps = 1
        obj_0 = np.tile(objPts, (12, 1, 1))
        obj_1 = copy.deepcopy(obj_0)

        for i in range(4):
            for j in range(3):
                obj_0[(i * 3 + j) * h_len:(i * 3 + j + 1) * h_len, i, j] -= eps
                obj_1[(i * 3 + j) * h_len:(i * 3 + j + 1) * h_len, i, j] += eps

        tmp_obj = np.concatenate((obj_0, obj_1))
        tmp_img = np.tile(imgPts, (24, 1, 1))
        tmp_hyp = np.zeros((tmp_obj.shape[0], 6), np.float64)

        NumericLayers.solvePnP(tmp_obj, tmp_img, cmat, tmp_hyp, 2)

        res_0 = tmp_hyp[0:h_len * 12, :]
        res_1 = tmp_hyp[h_len * 12:h_len * 24, :]

        for i in range(4):
            for j in range(3):
                jacobean[:, i, j] = np.sum(
                    grad *
                    (res_1[(i * 3 + j) * h_len:(i * 3 + j + 1) * h_len, :] -
                     res_0[(i * 3 + j) * h_len:(i * 3 + j + 1) * h_len, :]) /
                    (2 * eps),
                    axis=1)

        return jacobean

    def _pnp_grad_op(op, grad):
        aa = op.inputs[0]
        bb = op.inputs[1]
        cc = op.inputs[2]
        dd = op.inputs[3]
        p_grad = tf.py_func(_pnp_grad, [aa, bb, cc, dd, grad], tf.float64)
        return [p_grad, None, None, None]

    grad_name = "PnPGrad_" + str(uuid.uuid4())
    tf.RegisterGradient(grad_name)(_pnp_grad_op)
    g = tf.get_default_graph()
    with g.gradient_override_map({"PyFunc": grad_name}):
        output = tf.py_func(_pnp, inputs, tf.float64)
    return output
Exemple #27
0
    def call(self, inp):
        rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))

        tf.RegisterGradient(rnd_name)(
            self.identity_grad)  # see _MySquareGrad for grad example
        g = tf.get_default_graph()
        with g.gradient_override_map({"PyFunc": rnd_name}):
            return tf.py_func(self.add_noise, [inp], [tf.float32],
                              stateful=True,
                              name="Noise")
Exemple #28
0
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
    global _py_func_id

    rnd_name = 'PyFuncGrad' + '%08d' % _py_func_id
    _py_func_id += 1

    tf.RegisterGradient(rnd_name)(grad)
    g = tf.get_default_graph()
    with g.gradient_override_map({"PyFunc": rnd_name, "PyFuncStateless": rnd_name}):
        return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
Exemple #29
0
def roi_pooling2d(x, y, name=None):
    with ops.op_scope([x], name, "Mysquare") as name:
        tf.RegisterGradient("roi")(
            _MySquareGrad)  # see _MySquareGrad for grad example
        g = tf.get_default_graph()
        with g.gradient_override_map({"PyFunc": "roi"}):
            sqr_x = tf.py_func(
                forward, [x, y], [tf.float32], name=name,
                stateful=True)  # <-- here's the call to the gradient
        return sqr_x[0]
Exemple #30
0
    def call(self, x, **kwargs):
        def pre_logpolar(window):
            n, m = window.shape[:2]
            #radius = m / 2
            xc = n / 2
            yc = m / 2
            #radius, xc, yc = util.aplicasift(window)
            #radius, xc, yc = util.aplicaCAMshift(window)
            pre = util.logpolar_naive(window, xc, yc)
            return pre

        def process_filters(img):
            filters = pre_logpolar(img)
            return filters

        # Define Actual gradient
        def _logpolarGrad(op, grad):
            return grad

        def process_samples(inp):
            # needs: number of filters, kernel size, stride, padding, activation

            num, n, m, d = inp.shape
            res = np.zeros((num, n, m, d))
            for i in range(num):
                #iimg = inp[i,:n,:m,:d]
                #print(iimg.shape)
                #radius, xc, yc = util.aplicasift(iimg)
                for c in range(d):
                    img = inp[i, ..., c]
                    lpimg = process_filters(img)
                    res[i, ..., c] = lpimg
            return res.astype('float32')  #np.float32(res)

        grad = _logpolarGrad
        name = 'process_logpolar'
        stateful = True
        num, n, m, d = x.get_shape().as_list()
        # Need to generate a unique name to avoid duplicates:
        rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))

        tf.RegisterGradient(rnd_name)(
            _logpolarGrad)  # see _MySquareGrad for grad example
        g = tf.get_default_graph()
        with g.gradient_override_map({"PyFunc": rnd_name}):
            outputs = tf.reshape(
                tf.py_func(process_samples, [x],
                           tf.float32,
                           stateful=stateful,
                           name=name), (-1, n, m, d))

        #outputs.set_shape(inputs.get_shape())
        #num, n, m, d = x.get_shape().as_list()
        #outputs = self.activation(tf.reshape(outputs,[-1, n, m, d]))
        return outputs