示例#1
0
    def __init__(self, approx_func="leaky_relu", degrees=(5, 4), cuda=None,
                 version="A", trainable=True, train_numerator=True,
                 train_denominator=True):
        super(Rational, self).__init__()

        if cuda is None:
            cuda = torch_cuda_available()
        if cuda is True:
            device = "cuda"
        elif cuda is False:
            device = "cpu"
        else:
            device = cuda

        w_numerator, w_denominator = get_parameters(version, degrees,
                                                    approx_func)

        self.numerator = nn.Parameter(torch.FloatTensor(w_numerator).to(device),
                                      requires_grad=trainable and train_numerator)
        self.denominator = nn.Parameter(torch.FloatTensor(w_denominator).to(device),
                                        requires_grad=trainable and train_denominator)
        self.register_parameter("numerator", self.numerator)
        self.register_parameter("denominator", self.denominator)
        self.device = device
        self.degrees = degrees
        self.version = version
        self.training = trainable

        self.init_approximation = approx_func

        if "cuda" in str(device):
            if version == "A":
                rational_func = Rational_CUDA_A_F
            elif version == "B":
                rational_func = Rational_CUDA_B_F
            elif version == "C":
                rational_func = Rational_CUDA_C_F
            elif version == "D":
                rational_func = Rational_CUDA_D_F
            else:
                raise ValueError("version %s not implemented" % version)

            self.activation_function = rational_func.apply
        else:
            if version == "A":
                rational_func = Rational_PYTORCH_A_F
            elif version == "B":
                rational_func = Rational_PYTORCH_B_F
            elif version == "C":
                rational_func = Rational_PYTORCH_C_F
            elif version == "D":
                rational_func = Rational_PYTORCH_D_F
            else:
                raise ValueError("version %s not implemented" % version)

            self.activation_function = rational_func
        self._handle_retrieve_mode = None
        self.distribution = None
        self.best_fitted_function = None
        self.best_fitted_function_params = None
示例#2
0
def is_torch_cuda_available():
    """Wrapper for torch cuda availability check (torch.cuda.is_available) that takes an environment variable
    'FORCE_CUDA' into account and returns also true iff FORCE_CUDA=1.

    This is necessary when building rational in a Dockerfile script since the docker build pass doesn't have
    access to cuda and thus torch.cuda.is_available always returns false, even when the docker image which is
    to be built in fact does have cuda.
    """
    force_cuda = os.getenv("FORCE_CUDA", "0") == "1"
    return force_cuda or torch_cuda_available()
示例#3
0
    def __init__(self,
                 w_numerator=init_w_numerator,
                 w_denominator=init_w_denominator,
                 center=center,
                 cuda=None,
                 version="A",
                 trainable=True,
                 train_center=True,
                 train_numerator=True,
                 train_denominator=True):
        super(PAU, self).__init__()

        if cuda is None:
            cuda = torch_cuda_available()

        self.center = nn.Parameter(torch.FloatTensor([center]),
                                   requires_grad=trainable and train_center)
        self.numerator = nn.Parameter(torch.FloatTensor(w_numerator),
                                      requires_grad=trainable
                                      and train_numerator)
        self.denominator = nn.Parameter(torch.FloatTensor(w_denominator),
                                        requires_grad=trainable
                                        and train_denominator)

        if cuda:
            if version == "A":
                pau_func = PAU_CUDA_A_F
            elif version == "B":
                pau_func = PAU_CUDA_B_F
            elif version == "C":
                pau_func = PAU_CUDA_C_F
            elif version == "D":
                pau_func = PAU_CUDA_D_F
            else:
                raise ValueError("version %s not implemented" % version)

            self.activation_function = pau_func.apply
        else:
            if version == "A":
                pau_func = PAU_PYTORCH_A_F
            elif version == "B":
                pau_func = PAU_PYTORCH_B_F
            elif version == "C":
                pau_func = PAU_PYTORCH_C_F
            elif version == "D":
                pau_func = PAU_PYTORCH_D_F
            else:
                raise ValueError("version %s not implemented" % version)

            self.activation_function = pau_func
示例#4
0
\#include <curand.h>
\#include <curand_kernel.h>
\#include <curand_philox4x32_x.h>

constexpr uint32_t THREADS_PER_BLOCK = 512;
"""

    file_content = airspeed.Template(template + template_contents)

    content = file_content.merge(locals())

    with open(fname, "w") as text_file:
        text_file.write(content)


if torch_cuda_available():
    version_names = []
    template_contents = ""
    for template_fname in sorted(glob.glob("rational/_cuda/versions/*.cu")):
        version_names.append(Path(template_fname).stem)
        with open(template_fname) as infile:
            template_contents += infile.read()

    generate_cpp_module(fname='rational/_cuda/rational_cuda.cpp',
                        versions=version_names)
    generate_cpp_kernels_module(
        fname='rational/_cuda/rational_cuda_kernels.cu',
        template_contents=template_contents)

with open("README.md", "r") as fh:
    long_description = fh.read()
示例#5
0
"""
Padé Activation Units - Rational Activation Functions for pytorch
=================================================================

This module allows you to create Rational Neural Networks using Padé Activation
Units - Learnabe Rational activation functions.
"""
import torch.nn as nn
from torch.cuda import is_available as torch_cuda_available
from rational.utils.get_weights import get_parameters

if torch_cuda_available():
    try:
        from rational.torch.rational_cuda_functions import *
    except ImportError as ImpErr:
        print('\n\nError importing rational_cuda, is cuda not available?\n\n')
        print(ImpErr)
        exit(1)

from rational.torch.rational_pytorch_functions import *


class RecurrentRational():
    """
        Recurrent rational activation function - wrapper for Rational

        Arguments:
                approx_func (str):
                    The name of the approximated function for initialisation. \
                    The different initialable functions are available in \
                    `rational.rationals_config.json`. \n