예제 #1
0
    def __init__(self,
                 approx_func="leaky_relu",
                 degrees=(5, 4),
                 cuda=False,
                 version="A",
                 trainable=True,
                 train_numerator=True,
                 train_denominator=True):
        """
        Rational activation function inherited from tensorflow keras ``Layer``

        Arguments:
                approx_func (str):
                    The name of the approximated function for initialisation. \
                    The different initialable functions are available in \
                    `rational.rationals_config.json`. \n
                    Default ``leaky_relu``.
                degrees (tuple of int):
                    The degrees of the numerator (P) and denominator (Q).\n
                    Default ``(5, 4)``
                cuda (bool):
                    Use GPU CUDA version. \n
                    If ``None``, use cuda if available on the machine\n
                    Default ``None``
                version (str):
                    Version of Rational to use. Rational(x) = P(x)/Q(x)\n
                    `A`: Q(x) = 1 + \|b_1.x\| + \|b_2.x\| + ... + \|b_n.x\|\n
                    `B`: Q(x) = 1 + \|b_1.x + b_2.x + ... + b_n.x\|\n
                    `C`: Q(x) = 0.1 + \|b_1.x + b_2.x + ... + b_n.x\|\n
                    `D`: like `B` with noise\n
                    Default ``A``
                trainable (bool):
                    If the weights are trainable, i.e, if they are updated during \
                    backward pass\n
                    Default ``True``
        Returns:
            Module: Rational module
        """
        super(Rational, self).__init__()

        w_numerator, w_denominator = get_parameters(version, degrees,
                                                    approx_func)
        self.numerator = tf.Variable(initial_value=w_numerator,
                                     trainable=trainable and train_numerator)
        self.denominator = tf.Variable(initial_value=w_denominator,
                                       trainable=trainable
                                       and train_denominator)

        if version == "A":
            rational_func = Rational_PYTORCH_A_F
        elif version == "B":
            rational_func = Rational_PYTORCH_B_F
        elif version == "C":
            rational_func = Rational_PYTORCH_C_F
        elif version == "D":
            rational_func = Rational_PYTORCH_D_F
        else:
            raise ValueError("version %s not implemented" % version)

        self.rational_func = rational_func
예제 #2
0
    def __init__(self, approx_func='leaky_relu', degrees=(5, 4), cuda=False,
                 version="A", trainable=True, train_numerator=True,
                 train_denominator=True):
        super(Rational, self).__init__()
        w_numerator, w_denominator = get_parameters(version, degrees, approx_func)
        self.device = gpu() if cuda else cpu()

        with self.name_scope():
            self.numerator = self.params.get(name='w_numerator', shape=(len(w_numerator),),
                                             init=initializer.Constant(w_numerator),
                                             grad_req='write' if train_numerator and trainable else 'null')
            self.denominator = self.params.get(name='w_denominator', shape=(len(w_denominator),),
                                               init=initializer.Constant(w_denominator),
                                               grad_req='write' if train_denominator and trainable else 'null')

        self.degrees = degrees
        self.version = version
        self.training = trainable

        self.init_approximation = approx_func

        if version == "A":
            rational_func = Rational_MXNET_A_F
        elif version == "B":
            rational_func = Rational_MXNET_B_F
        elif version == "C":
            rational_func = Rational_MXNET_C_F
        elif version == "D":
            rational_func = Rational_MXNET_D_F
        else:
            raise ValueError("version %s not implemented" % version)

        self.activation_function = rational_func
예제 #3
0
    def __init__(self, approx_func="leaky_relu", degrees=(5, 4), cuda=None,
                 version="A", trainable=True, train_numerator=True,
                 train_denominator=True):
        super(Rational, self).__init__()

        if cuda is None:
            cuda = torch_cuda_available()
        if cuda is True:
            device = "cuda"
        elif cuda is False:
            device = "cpu"
        else:
            device = cuda

        w_numerator, w_denominator = get_parameters(version, degrees,
                                                    approx_func)

        self.numerator = nn.Parameter(torch.FloatTensor(w_numerator).to(device),
                                      requires_grad=trainable and train_numerator)
        self.denominator = nn.Parameter(torch.FloatTensor(w_denominator).to(device),
                                        requires_grad=trainable and train_denominator)
        self.register_parameter("numerator", self.numerator)
        self.register_parameter("denominator", self.denominator)
        self.device = device
        self.degrees = degrees
        self.version = version
        self.training = trainable

        self.init_approximation = approx_func

        if "cuda" in str(device):
            if version == "A":
                rational_func = Rational_CUDA_A_F
            elif version == "B":
                rational_func = Rational_CUDA_B_F
            elif version == "C":
                rational_func = Rational_CUDA_C_F
            elif version == "D":
                rational_func = Rational_CUDA_D_F
            else:
                raise ValueError("version %s not implemented" % version)

            self.activation_function = rational_func.apply
        else:
            if version == "A":
                rational_func = Rational_PYTORCH_A_F
            elif version == "B":
                rational_func = Rational_PYTORCH_B_F
            elif version == "C":
                rational_func = Rational_PYTORCH_C_F
            elif version == "D":
                rational_func = Rational_PYTORCH_D_F
            else:
                raise ValueError("version %s not implemented" % version)

            self.activation_function = rational_func
        self._handle_retrieve_mode = None
        self.distribution = None
        self.best_fitted_function = None
        self.best_fitted_function_params = None
예제 #4
0
    def __init__(self,
                 approx_func='leaky_relu',
                 degrees=(5, 4),
                 cuda=False,
                 version='A',
                 trainable=True,
                 **kwargs):
        super(Rational, self).__init__(**kwargs)

        # read initial parameter configuration from external files
        w_numerator, w_denominator = get_parameters(version, degrees,
                                                    approx_func)

        # convert w_numerator and w_denominator to mxnet arrays
        w_numerator = mx.nd.array(w_numerator)
        w_denominator = mx.nd.array(w_denominator)

        # register the amount of weights in numerator and denominator, since we need them during
        # symbolic execution, but are unable to retrieve them at later stages
        self.numerator_length = len(w_numerator)
        self.denominator_length = len(w_denominator)
        self.training = trainable
        self.degrees = degrees
        self.version = version
        self.init_approximation = approx_func

        # set specified context (currently not happening, since unclear, how and why helpful)
        # self.device = gpu() if cuda else cpu()

        # register and configure weights (numerator and denominator coefficients)
        with self.name_scope():
            self.numerator = self.params.get(
                name='w_numerator',
                shape=(len(w_numerator), ),
                init=initializer.Constant(w_numerator),
                grad_req='write' if trainable else 'null',
                differentiable=trainable)
            self.denominator = self.params.get(
                name='w_denominator',
                shape=(len(w_denominator), ),
                init=initializer.Constant(w_denominator),
                grad_req='write' if trainable else 'null',
                differentiable=trainable)

        # register whether function is trainable, since this information needs to be passed to
        # version D
        self.training = trainable

        self.init_approximation = approx_func

        # set rational activation function version
        self.rational_func = {'A': _version_a, 'B': _version_b, 'C': _version_c, 'D': _version_d} \
            .get(version)
        if self.rational_func is None:
            raise ValueError(
                "rational activation function version %s not implemented" %
                version)
예제 #5
0
    def __init__(self, approx_func="leaky_relu", degrees=(5, 4), version="A"):
        w_numerator, w_denominator = get_parameters(version, degrees,
                                                    approx_func)
        self.numerator = w_numerator
        self.denominator = w_denominator
        self.init_approximation = approx_func
        self.degrees = degrees
        self.version = version

        if version == "A":
            rational_func = Rational_version_A
        elif version == "B":
            rational_func = Rational_version_B
        elif version == "C":
            rational_func = Rational_version_C
        else:
            raise ValueError("version %s not implemented" % version)
        self.activation_function = rational_func
예제 #6
0
    def __init__(self, approx_func="leaky_relu", degrees=(5, 4), cuda=False,
                 version="A", trainable=True, train_numerator=True, train_denominator=True):
        super(Rational, self).__init__()

        w_numerator, w_denominator = get_parameters(version, degrees, approx_func)
        self.numerator = tf.Variable(initial_value=w_numerator, trainable=trainable and train_numerator)
        self.denominator = tf.Variable(initial_value=w_denominator, trainable=trainable and train_denominator)

        if version == "A":
            rational_func = Rational_PYTORCH_A_F
        elif version == "B":
            rational_func = Rational_PYTORCH_B_F
        elif version == "C":
            rational_func = Rational_PYTORCH_C_F
        elif version == "D":
            rational_func = Rational_PYTORCH_D_F
        else:
            raise ValueError("version %s not implemented" % version)

        self.rational_func = rational_func
예제 #7
0
    def __init__(self,
                 approx_func="leaky_relu",
                 degrees=(5, 4),
                 cuda=False,
                 version="A",
                 trainable=True):
        super().__init__()

        w_numerator, w_denominator = get_parameters(version, degrees,
                                                    approx_func)

        # add trainable weight vectors for numerator (a_0, ... a_n) and denominator (b_0, ... b_m)
        self.numerator = self.add_weight(
            shape=(len(w_numerator), ),
            name='w_numerator',
            trainable=trainable,
            initializer=tf.keras.initializers.Constant(w_numerator))

        self.denominator = self.add_weight(
            shape=(len(w_denominator), ),
            name='w_denominator',
            trainable=trainable,
            initializer=tf.keras.initializers.Constant(w_denominator))

        # record whether weights are trainable. Used later by call() method
        self.training = trainable
        self.degrees = degrees
        self.version = version
        self.init_approximation = approx_func

        # set rational activation function version
        self.rational_func = {'A': _version_a, 'B': _version_b, 'C': _version_c, 'D': _version_d}\
            .get(version)
        if self.rational_func is None:
            raise ValueError(
                "rational activation function version %s not implemented" %
                version)
예제 #8
0
    def __init__(self,
                 approx_func="leaky_relu",
                 degrees=(5, 4),
                 cuda=False,
                 version="A",
                 trainable=True,
                 train_numerator=True,
                 train_denominator=True):
        """
        Inherited from tensorflow.keras.layers.Layer

        Defines custom layer attributes, and creates layer state variables that do not depend on
        input shapes, using ``add_weight()``

        :param approx_func: The name of the approximated function for initialisation.
        The different functions are available in `rational.rationals_config.json`.
        Default ``leaky_relu``.
        :param degrees: The degrees of the numerator (P) and denominator (Q).
        Default ``(5, 4)``
        :param cuda: whether to execute on cuda device. NOTE: CURRENTLY NOT USED, i.e.
        function always executes on cuda device if available.
        :param version: Version of Rational to use. Rational(x) = P(x)/Q(x)
        `A`: Q(x) = 1 + |b_1.x| + |b_2.x| + ... + |b_n.x|
        `B`: Q(x) = 1 + |b_1.x + b_2.x + ... + b_n.x|
        `C`: Q(x) = 0.1 + |b_1.x + b_2.x + ... + b_n.x|
        `D`: like `B` with noise
        Default ``A``
        :param trainable: If the weights are trainable, i.e, if they are updated during
        backward pass.
        Default ``True``
        :param train_numerator: whether numerator coefficients are trainable
        :param train_denominator: whether denominator coefficients are trainable
        """
        super(Rational, self).__init__()

        w_numerator, w_denominator = get_parameters(version, degrees,
                                                    approx_func)

        # add trainable weight vectors for numerator (a_0, ... a_n) and denominator (b_0, ... b_m)
        self.numerator = self.add_weight(
            shape=(len(w_numerator), ),
            name='w_numerator',
            trainable=trainable and train_numerator,
            initializer=tf.keras.initializers.Constant(w_numerator))

        self.denominator = self.add_weight(
            shape=(len(w_denominator), ),
            name='w_denominator',
            trainable=trainable and train_denominator,
            initializer=tf.keras.initializers.Constant(w_denominator))

        # record whether weights are trainable. Used later by call() method
        self.training = trainable

        # set rational activation function version
        self.rational_func = {'A': _version_a, 'B': _version_b, 'C': _version_c, 'D': _version_d}\
            .get(version)
        if self.rational_func is None:
            raise ValueError(
                "rational activation function version %s not implemented" %
                version)