Exemplo n.º 1
0
    def torch(self,
              cuda=None,
              trainable=True,
              train_numerator=True,
              train_denominator=True):
        """
        Returns a torch version of this activation function.

        Arguments:
                cuda (bool):
                    Use GPU CUDA version. If None, use cuda if available on \
                    the machine\n
                    Default ``None``
                trainable (bool):
                    If the weights are trainable, i.e, if they are updated \
                    during backward pass\n
                    Default ``True``

        Returns:
            function: Rational torch function
        """
        from rational.torch import Rational as Rational_torch
        import torch.nn as nn
        import torch
        rtorch = Rational_torch(self.init_approximation, self.degrees, cuda,
                                self.version, trainable, train_numerator,
                                train_denominator)
        rtorch.numerator = nn.Parameter(
            torch.FloatTensor(self.numerator).to(rtorch.device),
            requires_grad=trainable and train_numerator)
        rtorch.denominator = nn.Parameter(
            torch.FloatTensor(self.denominator).to(rtorch.device),
            requires_grad=trainable and train_denominator)
        return rtorch
Exemplo n.º 2
0
def test_conversion_gpu_to_cpuC():
    rational = Rational(version='C', cuda=True)
    rational.cpu()
    params = np.all([str(para.device) == 'cpu' for para in rational.parameters()])
    cpu_f = "PYTORCH_C" in rational.activation_function.__qualname__
    new_res = rational(inp).detach().numpy()
    coherent_compute = np.all(np.isclose(new_res, expected_res, atol=5e-02))
    assert params and cpu_f and coherent_compute
Exemplo n.º 3
0
def test_conversion_cpu_to_gpuD():
    rational = Rational(version='D', cuda=False, trainable=False)
    rational.cuda()
    params = np.all(['cuda' in str(para.device) for para in rational.parameters()])
    cpu_f = "CUDA_D" in rational.activation_function.__qualname__
    new_res = rational(cuda_inp).clone().detach().cpu().numpy()
    coherent_compute = np.all(np.isclose(new_res, expected_res, atol=5e-02))
    assert params and cpu_f and coherent_compute
Exemplo n.º 4
0
    def __init__(self,
                 input_shape,
                 output_shape,
                 recurrent=False,
                 cuda=False,
                 **kwargs):
        super().__init__()

        n_input = input_shape[0]
        n_output = output_shape[0]

        self._h1 = nn.Conv2d(n_input, 32, kernel_size=8, stride=4)
        self._h2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
        self._h3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
        self._h4 = nn.Linear(3136, self.n_features)
        self._h5 = nn.Linear(self.n_features, n_output)

        nn.init.xavier_uniform_(self._h1.weight,
                                gain=nn.init.calculate_gain('relu'))
        nn.init.xavier_uniform_(self._h2.weight,
                                gain=nn.init.calculate_gain('relu'))
        nn.init.xavier_uniform_(self._h3.weight,
                                gain=nn.init.calculate_gain('relu'))
        nn.init.xavier_uniform_(self._h4.weight,
                                gain=nn.init.calculate_gain('relu'))
        nn.init.xavier_uniform_(self._h5.weight,
                                gain=nn.init.calculate_gain('linear'))

        if recurrent:
            self.act_func1 = Rational(cuda=cuda)
            self.act_func2 = self.act_func1
            self.act_func3 = self.act_func1
            self.act_func4 = self.act_func1
        else:
            self.act_func1 = Rational(cuda=cuda)
            self.act_func2 = Rational(cuda=cuda)
            self.act_func3 = Rational(cuda=cuda)
            self.act_func4 = Rational(cuda=cuda)

        if cuda:
            self.cuda()
Exemplo n.º 5
0
from rational.torch import Rational
rational_function = Rational("tanh")  # Initialized closed to tanh

rational_function.show()

import numpy as np
rational_function.show(np.arange(-6, 12, 2))

rational_function.input_retrieve_mode()
# Retrieving input from now on.

import torch
means = torch.ones((50, 50)) * 2.
stds = torch.ones((50, 50)) * 3.
for _ in range(1500):
    input = torch.normal(means, stds).to(rational_function.device)
    rational_function(input)

# Training mode, no longer retrieving the input.

rational_function.show()
Exemplo n.º 6
0
import torch
from torch.nn.functional import leaky_relu
from rational.torch import Rational
import numpy as np


t = torch.tensor([-2., -1, 0., 1., 2.])
expected_res = np.array(leaky_relu(t))
inp = torch.from_numpy(np.array(t)).reshape(-1)
cuda_inp = torch.tensor(np.array(t), dtype=torch.float, device="cuda").reshape(-1)


rationalA_lrelu_cpu = Rational(version='A', cuda=False)(inp).detach().numpy()
rationalB_lrelu_cpu = Rational(version='B', cuda=False)(inp).detach().numpy()
rationalC_lrelu_cpu = Rational(version='C', cuda=False)(inp).detach().numpy()
rationalD_lrelu_cpu = Rational(version='D', cuda=False, trainable=False)(inp).detach().numpy()

rationalA_lrelu_gpu = Rational(version='A', cuda=True)(cuda_inp).clone().detach().cpu().numpy()
rationalB_lrelu_gpu = Rational(version='B', cuda=True)(cuda_inp).clone().detach().cpu().numpy()
rationalC_lrelu_gpu = Rational(version='C', cuda=True)(cuda_inp).clone().detach().cpu().numpy()
rationalD_lrelu_gpu = Rational(version='D', cuda=True, trainable=False)(cuda_inp).clone().detach().cpu().numpy()


#  Tests on cpu
def test_rationalA_cpu_lrelu():
    assert np.all(np.isclose(rationalA_lrelu_cpu, expected_res, atol=5e-02))


def test_rationalB_cpu_lrelu():
    assert np.all(np.isclose(rationalB_lrelu_cpu, expected_res, atol=5e-02))
Exemplo n.º 7
0
from rational.torch import Rational

rational_function = Rational()  # Initialized closed to Leaky ReLU
print(rational_function)
#    Pade Activation Unit (version A) of degrees (5, 4) running on cuda:0
# or Pade Activation Unit (version A) of degrees (5, 4) running on cpu

rational_function.cpu()
rational_function.cuda()

print(rational_function.degrees)
# (5, 4)
print(rational_function.version)
# A
print(rational_function.training)
# True

import torch
import torch.nn as nn


class RationalNetwork(nn.Module):
    n_features = 512

    def __init__(self,
                 input_shape,
                 output_shape,
                 recurrent=False,
                 cuda=False,
                 **kwargs):
        super().__init__()
Exemplo n.º 8
0
find_weights(F.tanh)

# approximated function name: tanh
# approximated function name: tanh
# degree of the numerator P: 5
# degree of the denominator Q: 4
# lower bound: -3
# upper bound: 3
# Rational Version: B

# Found coeffient :
# P: [2.11729498e-09 9.99994250e-01 6.27633277e-07 1.07708645e-01
#  2.94655690e-08 8.71124374e-04]
# Q: [6.37690834e-07 4.41014181e-01 2.27476614e-07 1.45810399e-02]

# Do you want a plot of the result (y/n)y

# Do you want to store them in the json file ? (y/n)y

from rational.torch import Rational

rational_tanh_B = Rational("tanh", version="B")
print(rational_tanh_B.init_approximation)
# 'tanh'
print(rational_tanh_B.numerator.cpu().detach().numpy())
# [2.1172950e-09 9.9999428e-01 6.2763326e-07 1.0770865e-01 2.9465570e-08
#  8.7112439e-04]
print(rational_tanh_B.denominator.cpu().detach().numpy())
# [6.3769085e-07 4.4101417e-01 2.2747662e-07 1.4581040e-02]