Exemplo n.º 1
0
    def __init__(self):
        self.lr = 0.00042307692307692304
        self.hidden_sizes = [400, 400, 400, 400, 400, 400, 400]

        self.momentum = 0.7923
        
        self.activations = [
            nn.Hardshrink(1.1),
            nn.LeakyReLU(negative_slope=0.01),
            nn.LeakyReLU(negative_slope=0.01),
            nn.LeakyReLU(negative_slope=0.01),
            nn.LeakyReLU(negative_slope=0.01),
            nn.LeakyReLU(negative_slope=0.01),
            nn.Hardshrink(1.1),
            nn.Sigmoid()
        ]

        # NOTE: Grids

        # self.activations_grid = [
        #     [
        #         nn.Hardshrink(k),
        #         nn.LeakyReLU(negative_slope=0.01),
        #         nn.LeakyReLU(negative_slope=0.01),
        #         nn.LeakyReLU(negative_slope=0.01),
        #         nn.LeakyReLU(negative_slope=0.01),
        #         nn.LeakyReLU(negative_slope=0.01),
        #         nn.Hardshrink(k),
        #         nn.Sigmoid()
        #     ]
        #     for k in np.linspace(0.05, 2.5, 15)
        # ]

        self.lr_grid = list(np.linspace(0.0001, 0.0007, 40))

        # self.hidden_sizes_grid = [
        #     [i, j, k, l, m, o]
        #     for i in [400]
        #     for j in [400]
        #     for k in [400]
        #     for l in [400]
        #     for m in [400]
        #     for o in [400]
        #     for p in [400]
        # ]

        # self.momentum_grid = list(np.linspace(0.7,0.9,40))

        self.grid_search = GridSearch(self)
        self.grid_search.set_enabled(False)
Exemplo n.º 2
0
    def __init__(self):

        super().__init__()
        self.out = nn.Sequential(nn.Linear(7, 2), nn.PReLU(), nn.Linear(2, 6),
                                 nn.SELU(), nn.Linear(6, 2), nn.ELU(),
                                 nn.Linear(2, 5), nn.Hardshrink(),
                                 nn.Linear(5, 2), nn.Tanh(), nn.Linear(2, 2))
Exemplo n.º 3
0
def str2act(s):
    if s is 'none':
        return None
    elif s is 'hardtanh':
        return nn.Hardtanh()
    elif s is 'sigmoid':
        return nn.Sigmoid()
    elif s is 'relu6':
        return nn.ReLU6()
    elif s is 'tanh':
        return nn.Tanh()
    elif s is 'tanhshrink':
        return nn.Tanhshrink()
    elif s is 'hardshrink':
        return nn.Hardshrink()
    elif s is 'leakyrelu':
        return nn.LeakyReLU()
    elif s is 'softshrink':
        return nn.Softshrink()
    elif s is 'softsign':
        return nn.Softsign()
    elif s is 'relu':
        return nn.ReLU()
    elif s is 'prelu':
        return nn.PReLU()
    elif s is 'softplus':
        return nn.Softplus()
    elif s is 'elu':
        return nn.ELU()
    elif s is 'selu':
        return nn.SELU()
    else:
        raise ValueError("[!] Invalid activation function.")
Exemplo n.º 4
0
 def create_str_to_activations_converter(self):
     """Creates a dictionary which converts strings to activations"""
     str_to_activations_converter = {
         "elu": nn.ELU(),
         "hardshrink": nn.Hardshrink(),
         "hardtanh": nn.Hardtanh(),
         "leakyrelu": nn.LeakyReLU(),
         "logsigmoid": nn.LogSigmoid(),
         "prelu": nn.PReLU(),
         "relu": nn.ReLU(),
         "relu6": nn.ReLU6(),
         "rrelu": nn.RReLU(),
         "selu": nn.SELU(),
         "sigmoid": nn.Sigmoid(),
         "softplus": nn.Softplus(),
         "logsoftmax": nn.LogSoftmax(),
         "softshrink": nn.Softshrink(),
         "softsign": nn.Softsign(),
         "tanh": nn.Tanh(),
         "tanhshrink": nn.Tanhshrink(),
         "softmin": nn.Softmin(),
         "softmax": nn.Softmax(dim=1),
         "none": None
     }
     return str_to_activations_converter
Exemplo n.º 5
0
    def __init__(self, alpha=1.0):
        super().__init__()
        self.activations = [
            nn.ELU(),
            nn.Hardshrink(),
            nn.Hardtanh(),
            nn.LeakyReLU(),
            nn.LogSigmoid(),
            nn.ReLU(),
            nn.PReLU(),
            nn.SELU(),
            nn.CELU(),
            nn.Sigmoid(),
            nn.Softplus(),
            nn.Softshrink(),
            nn.Softsign(),
            nn.Tanh(),
            nn.Tanhshrink()
        ]

        self.P = [
            torch.nn.Parameter(torch.randn(1, requires_grad=True))
            for _ in self.activations
        ]

        for activation, param in zip(self.activations, self.P):
            activation_name = str(activation).split("(")[0]
            self.add_module(name=activation_name, module=activation)
            self.register_parameter(name=activation_name + "p", param=param)
Exemplo n.º 6
0
    def __init__(self):
        self.best_step = sys.maxsize
        self.sols = {}
        self.solsSum = {}
        self.hidden_size = 50
        self.lr = 0.01
        self.activation_hidden = 'relu6'
        self.activation_output = 'sigmoid'
        self.activations = {
            'sigmoid': nn.Sigmoid(),
            'relu': nn.ReLU(),
            'relu6': nn.ReLU6(),
            'rrelu0103': nn.RReLU(0.1, 0.3),
            'rrelu0205': nn.RReLU(0.2, 0.5),
            'htang1': nn.Hardtanh(-1, 1),
            'htang2': nn.Hardtanh(-2, 2),
            'htang3': nn.Hardtanh(-3, 3),
            'tanh': nn.Tanh(),
            'elu': nn.ELU(),
            'selu': nn.SELU(),
            'hardshrink': nn.Hardshrink(),
            'leakyrelu01': nn.LeakyReLU(0.1),
            'leakyrelu001': nn.LeakyReLU(0.01),
            'logsigmoid': nn.LogSigmoid(),
            'prelu': nn.PReLU(),
        }
        self.hidden_size_grid = [16, 20, 26, 32, 36, 40, 45, 50, 54]
        self.lr_grid = [0.0001, 0.001, 0.005, 0.01, 0.1, 1]

        # self.lr_grid = [0.1, .5, 1, 1.5, 2, 3, 5, 10]

        # self.activation_hidden_grid = list(self.activations.keys())
        # self.activation_output_grid = list(self.activations.keys())
        self.grid_search = GridSearch(self)
        self.grid_search.set_enabled(False)
Exemplo n.º 7
0
 def __init__(self):
     super(NNActivationModule, self).__init__()
     self.activations = nn.ModuleList([
         nn.ELU(),
         nn.Hardshrink(),
         nn.Hardsigmoid(),
         nn.Hardtanh(),
         nn.Hardswish(),
         nn.LeakyReLU(),
         nn.LogSigmoid(),
         # nn.MultiheadAttention(),
         nn.PReLU(),
         nn.ReLU(),
         nn.ReLU6(),
         nn.RReLU(),
         nn.SELU(),
         nn.CELU(),
         nn.GELU(),
         nn.Sigmoid(),
         nn.SiLU(),
         nn.Mish(),
         nn.Softplus(),
         nn.Softshrink(),
         nn.Softsign(),
         nn.Tanh(),
         nn.Tanhshrink(),
         # nn.Threshold(0.1, 20),
         nn.GLU(),
         nn.Softmin(),
         nn.Softmax(),
         nn.Softmax2d(),
         nn.LogSoftmax(),
         # nn.AdaptiveLogSoftmaxWithLoss(),
     ])
Exemplo n.º 8
0
 def __init__(self, p, tr, step, phi_step, thresh, dev):
     super(Instance_block, self).__init__()
     self.p = p
     self.dev = dev
     self.training = tr
     self.step = nn.Parameter(step, requires_grad=self.training)
     self.phi_step = nn.Parameter(phi_step, requires_grad=self.training)
     self.thresh = torch.as_tensor(thresh)
     self.thresh_func = nn.Hardshrink(self.thresh)
Exemplo n.º 9
0
    def __init__(self,
                 input_shape,
                 num_layers,
                 neurons,
                 activator_id=5,
                 optimizer_id=0):
        super().__init__()
        self.num_layers = num_layers  # number of layers
        self.neurons = neurons  # number of neurons in each layer e.g. for 2 layers, neurons=[10,20]
        self.activator_id = activator_id  # activation function, can be one of the following: ELU, Hardshrink, LeakyReLU, LogSigmoid, PReLU, ReLU, ReLU6, RReLU, SELU, CELU, Sigmoid
        self.optimizer_id = optimizer_id  # optimizer id, can be one of the following: Adadelta, Adagrad, Adam, Adamax, ASGD, RMSprop, Rprop, SGD

        # set activation function
        if (activator_id == 0):
            self.activator = nn.ELU()
        elif (activator_id == 1):
            self.activator = nn.Hardshrink()
        elif (activator_id == 2):
            self.activator = nn.LeakyReLU()
        elif (activator_id == 3):
            self.activator = nn.LogSigmoid()
        elif (activator_id == 4):
            self.activator = nn.PReLU()
        elif (activator_id == 5):
            self.activator = nn.ReLU()
        elif (activator_id == 6):
            self.activator = nn.ReLU6()
        elif (activator_id == 7):
            self.activator = nn.RReLU()
        elif (activator_id == 8):
            self.activator = nn.SELU()
        elif (activator_id == 9):
            self.activator = nn.CELU()

        # network architecture
        if (num_layers == 1):
            self.layers = nn.Sequential(
                nn.Linear(input_shape, self.neurons[0]), self.activator,
                nn.Linear(self.neurons[0], 1))
        elif (num_layers == 2):
            self.layers = nn.Sequential(
                nn.Linear(input_shape, self.neurons[0]), self.activator,
                nn.Linear(self.neurons[0], self.neurons[1]), self.activator,
                nn.Linear(self.neurons[1], 1))
        elif (num_layers == 3):
            self.layers = nn.Sequential(
                nn.Linear(input_shape, self.neurons[0]), self.activator,
                nn.Linear(self.neurons[0], self.neurons[1]), self.activator,
                nn.Linear(self.neurons[1], self.neurons[2]), self.activator,
                nn.Linear(self.neurons[2], 1))
        elif (num_layers == 4):
            self.layers = nn.Sequential(
                nn.Linear(input_shape, self.neurons[0]), self.activator,
                nn.Linear(self.neurons[0], self.neurons[1]), self.activator,
                nn.Linear(self.neurons[1], self.neurons[2]), self.activator,
                nn.Linear(self.neurons[2], self.neurons[3]), self.activator,
                nn.Linear(self.neurons[3], 1))
Exemplo n.º 10
0
 def __init__(self):
     self.activations = {
         'sigmoid': nn.Sigmoid(),
         'relu': nn.ReLU(),
         'relu6': nn.ReLU6(),
         'rrelu0103': nn.RReLU(0.1, 0.3),
         'rrelu0205': nn.RReLU(0.2, 0.5),
         'htang1': nn.Hardtanh(-1, 1),
         'htang2': nn.Hardtanh(-2, 2),
         'htang3': nn.Hardtanh(-3, 3),
         'tanh': nn.Tanh(),
         'elu': nn.ELU(),
         'selu': nn.SELU(),
         'hardshrink': nn.Hardshrink(),
         'leakyrelu01': nn.LeakyReLU(0.1),
         'leakyrelu001': nn.LeakyReLU(0.01),
         'logsigmoid': nn.LogSigmoid(),
         'prelu': nn.PReLU(),
     }
     self.loss_functions = {
         'binary_cross_entropy': nn.BCELoss(),
         'binary_cross_entropy_with_logits': nn.BCEWithLogitsLoss(),
         'poisson_nll_loss': nn.PoissonNLLLoss(),
         # 'cosine_embedding_loss': nn.CosineEmbeddingLoss(),
         # 'cross_entropy': nn.CrossEntropyLoss(),
         # 'ctc_loss': nn.CTCLoss(),
         'hinge_embedding_loss': nn.HingeEmbeddingLoss(),
         'kl_div': nn.KLDivLoss(),
         'l1_loss': nn.L1Loss(),
         'mse_loss': nn.MSELoss(),
         # 'margin_ranking_loss': nn.MarginRankingLoss(),
         # 'multilabel_margin_loss': nn.MultiLabelMarginLoss(),
         'multilabel_soft_margin_loss': nn.MultiLabelSoftMarginLoss(),
         # 'multi_margin_loss': nn.MultiMarginLoss(),
         # 'nll_loss': nn.NLLLoss(),
         'smooth_l1_loss': nn.SmoothL1Loss(),
         'soft_margin_loss': nn.SoftMarginLoss(),
         # 'triplet_margin_loss': nn.TripletMarginLoss(),
     }
     self.learning_rate = 2.8
     self.momentum = 0.8
     self.hidden_size = 10
     self.activation_hidden = 'relu'
     self.loss_function = 'binary_cross_entropy'
     self.sols = {}
     self.solsSum = {}
     self.random = 3
     self.random_grid = [_ for _ in range(10)]
     # self.hidden_size_grid = [20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39]
     # self.hidden_size_grid = [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
     # self.learning_rate_grid = [0.1, 1.0, 2.0, 3.0, 5.0]
     # self.activation_hidden_grid = list(self.activations.keys())
     # self.activation_hidden_grid = list(self.activations.keys())
     # self.loss_function_grid = list(self.loss_functions.keys())
     self.grid_search = GridSearch(self)
     self.grid_search.set_enabled(False)
Exemplo n.º 11
0
 def __init__(self, size):#,wav,wav_inv):
   super().__init__()
   # wavelet operator must be defined for specific resolution
   self.space1 = odl.uniform_discr([-128, -128], [128, 128], [2*size, 2*size],dtype='float32')
   self.wav = OperatorAsModule(odl.trafos.WaveletTransform(self.space1,'haar',1))
   self.wav_inv = OperatorAsModule(odl.trafos.WaveletTransform(self.space1,'haar',1).inverse)
   self.size = size
   self.fact  = 2**(np.log2(512/self.size)-1) # scaling factor
   
   self.thresh = nn.Hardshrink(0.01)
Exemplo n.º 12
0
    def __init__(self, num_classes=17):
        super(NN_model, self).__init__()
        self.linear1 = nn.Linear(376, 100)
        self.linear2 = nn.Linear(100, 50)
        self.linear3 = nn.Linear(50, num_classes)

        # self.m = nn.SELU()
        self.hard_tanh = nn.Hardtanh(min_val=-.4, max_val=0.4)
        self.tanh = nn.Tanh()
        self.shrink = nn.Hardshrink()
        self.norm = nn.LayerNorm(((1, 376)))
Exemplo n.º 13
0
 def __init__(
     self,
     dim,
     num_heads=8,
     qkv_bias=False,
     qk_scale=None,
     attn_drop=0.0,
     proj_drop=0.0,
     lambd=0.0,
 ):
     super().__init__(dim, num_heads, qkv_bias, qk_scale, attn_drop, proj_drop)
     self.hard_shrink = nn.Hardshrink(lambd=lambd)
Exemplo n.º 14
0
 def __init__(self, input_size, hidden_size, output_size, dropout=0.5):
     super(Net, self).__init__()
     #self.fc1 = nn.Linear(input_size, hidden_size) 
     self.fc1 = nn.Linear(input_size, hidden_size)
     self.l1 = nn.ReLU()
     self.l2 = nn.Sigmoid()
     self.l3 = nn.Tanh()
     self.l4 = nn.ELU()
     self.l5 = nn.Hardshrink()
     self.ln = nn.Linear(hidden_size, hidden_size)
     self.fc2 = nn.Linear(hidden_size, output_size)
     self.dp = nn.Dropout(dropout)
Exemplo n.º 15
0
 def __init__(self, num_classes=10):
     super(Model, self).__init__()
     self.features = nn.Sequential(
         nn.Conv2d(1, 32, kernel_size=3, stride=1),
         nn.Hardshrink(lambd=0.3),
         #nn.Softshrink(),
         #nn.ReLU(),
         #nn.Tanh(),
         #nn.ELU(),
         nn.Conv2d(32, 32, kernel_size=3, stride=1),
         nn.Hardshrink(lambd=0.3),
         #nn.Softshrink(),
         #nn.ReLU(),
         #nn.Tanh(),
         #nn.ELU(),
         nn.Conv2d(32, 32, kernel_size=3, stride=1),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(2))
     self.classifier = nn.Sequential(nn.Dropout(),
                                     nn.Linear(3872, num_classes),
                                     nn.Softmax(dim=1))
Exemplo n.º 16
0
def get_activation_function(activation_name):
    if activation_name == 'elu':
        return nn.ELU(alpha=1.0, inplace=False)
    elif activation_name == 'hardshrink':
        return nn.Hardshrink(lambd=0.5)
    elif activation_name == 'hardtanh':
        return nn.Hardtanh(min_val=-1,
                           max_val=1,
                           inplace=False,
                           min_value=None,
                           max_value=None)
    elif activation_name == 'leaky_relu':
        return nn.LeakyReLU(negative_slope=0.03, inplace=False)
    elif activation_name == 'logsigmoid':
        return nn.LogSigmoid()
    elif activation_name == 'prelu':
        return nn.PReLU(num_parameters=1, init=0.25)
    elif activation_name == 'relu':
        return nn.ReLU(inplace=False)
    elif activation_name == 'relu6':
        return nn.ReLU6(inplace=False)
    elif activation_name == 'rrelu':
        return nn.RReLU(lower=0.125, upper=0.3333333333333333, inplace=False)
    elif activation_name == 'selu':
        return nn.SELU(inplace=False)
    elif activation_name == 'sigmoid':
        return nn.Sigmoid()
    elif activation_name == 'softplus':
        return nn.Softplus(beta=1, threshold=20)
    elif activation_name == 'softshrink':
        return nn.Softshrink(lambd=0.5)
    elif activation_name == 'softsign':
        return nn.Softsign()
    elif activation_name == 'tanh':
        return nn.Tanh()
    elif activation_name == 'tanhshrink':
        return nn.Tanhshrink()
    elif activation_name == 'swish':

        def swish(x):
            return x * F.sigmoid(x)

        return swish
    else:
        print('Activation function name is not recognized.')
        sys.exit()
Exemplo n.º 17
0
def get_activation_func(activation_func_name: str = "relu"):
    if activation_func_name is "none":
        return None
    elif activation_func_name == "relu":
        return nn.ReLU()
    elif activation_func_name == "relu6":
        return nn.ReLU6()
    elif activation_func_name == "prelu":
        return nn.PReLU()
    elif activation_func_name == "elu":
        return nn.ELU()
    elif activation_func_name == "gelu":
        return nn.GELU()
    elif activation_func_name == "selu":
        return nn.SELU()
    elif activation_func_name == "leakyrelu":
        return nn.LeakyReLU()
    elif activation_func_name == "sigmoid":
        return nn.Sigmoid()
    elif activation_func_name == "tanh":
        return nn.Tanh()
    elif activation_func_name == "hardtanh":
        return nn.Hardtanh()
    elif activation_func_name == "tanhshrink":
        return nn.Tanhshrink()
    elif activation_func_name == "hardshrink":
        return nn.Hardshrink()
    elif activation_func_name == "softshrink":
        return nn.Softshrink()
    elif activation_func_name == "softsign":
        return nn.Softsign()
    elif activation_func_name == "softplus":
        return nn.Softplus()
    elif activation_func_name == "mish":
        return Mish()
    elif activation_func_name == "ftswishplus":
        return FTSwishPlus()
    elif activation_func_name == "lightrelu":
        return LightRelu()
    elif activation_func_name == "trelu":
        return TRelu()
    else:
        raise ValueError("[!] Invalid activation function.")
Exemplo n.º 18
0
def get_activation_(act):
    if act is None or act == 'relu':
        act_fn = nn.ReLU(inplace=True)  # relu as default
    elif act == 'mish':
        act_fn = Mish()
    elif act == 'selu':
        act_fn = Selu()
    elif act == 'elu':
        act_fn = nn.ELU()
    elif act == 'hardshrink':
        act_fn = nn.Hardshrink()
    elif act == 'hardtanh':
        act_fn = nn.Hardtanh()
    elif act == 'leakyrelu':
        act_fn = nn.LeakyReLU()
    elif act == 'logsigmoid':
        act_fn = nn.LogSigmoid()
    elif act == 'prelu':
        act_fn = nn.PReLU()
    elif act == 'relu6':
        act_fn = nn.ReLU6()
    elif act == 'rrelu':
        act_fn = nn.RReLU()
    elif act == 'celu':
        act_fn = nn.CELU()
    elif act == 'sigmoid':
        act_fn = nn.Sigmoid()
    elif act == 'softplus':
        act_fn = nn.Softplus()
    elif act == 'softshrink':
        act_fn = nn.Softshrink()
    elif act == 'softsign':
        act_fn = nn.Softsign()
    elif act == 'tanhshrink':
        act_fn = nn.Tanhshrink()
    else:
        raise ValueError('Act is not properly defined: check activations list')

    return act_fn
Exemplo n.º 19
0
 def __init__(self, P):
     super(AAF, self).__init__()
     self.P = P
     self.n = P.size()[0]
     self.F = [
         nn.ELU(),
         nn.Hardshrink(),
         nn.Hardtanh(),
         nn.LeakyReLU(),
         nn.LogSigmoid(),
         nn.ReLU(),
         nn.ReLU6(),
         nn.RReLU(),
         nn.SELU(),
         nn.CELU(),
         nn.Sigmoid(),
         nn.Softplus(),
         nn.Softshrink(),
         nn.Softsign(),
         nn.Tanh(),
         nn.Tanhshrink()
     ]
Exemplo n.º 20
0
    def AdaptiveActivationFunction16(self, x):
        sz = x.size()
        res = torch.zeros(sz).cuda()
        af = [0] * 16
        af[0] = nn.ELU()(x)
        af[1] = nn.Hardshrink()(x)
        af[2] = nn.Hardtanh()(x)
        af[3] = nn.LeakyReLU()(x)
        af[4] = nn.LogSigmoid()(x)
        af[5] = nn.ReLU()(x)
        af[6] = nn.ReLU6()(x)
        af[7] = nn.RReLU()(x)
        af[8] = nn.SELU()(x)
        af[9] = nn.CELU()(x)
        af[10] = nn.Sigmoid()(x)
        af[11] = nn.Softplus()(x)
        af[12] = nn.Softshrink()(x)
        af[13] = nn.Softsign()(x)
        af[14] = nn.Tanh()(x)
        af[15] = nn.Tanhshrink()(x)
        for i in range(11):
            res += self.adaparam[i] * af[i]

        return res
Exemplo n.º 21
0
    def __init__(self,
                 activation_function=0,
                 image_size=(140, 140),
                 kernel_size=3,
                 pool_var=False):
        super().__init__()

        self.pool_var = pool_var

        #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        #Define the activation function
        self.act = []
        self.act.append(nn.Sigmoid())  #0
        self.act.append(nn.ReLU())  #1
        self.act.append(nn.LeakyReLU())  #2
        self.act.append(nn.Tanh())  #3
        self.act.append(nn.SELU())  #4
        self.act.append(nn.Hardshrink())  #5
        self.act.append(nn.Hardtanh())  #6

        self.activation = self.act[activation_function]

        #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self.conv1 = nn.Conv2d(3, 6, kernel_size)
        self.conv2 = nn.Conv2d(6, 16, kernel_size)
        self.conv3 = nn.Conv2d(16, 32, 5)
        self.pool = nn.MaxPool2d(2, 2)

        self.n_neurons_fc = self.evaluateNeuronFCLayer(image_size)
        # self.n_neurons_fc = 25088

        self.fc1 = nn.Linear(self.n_neurons_fc, 2048)
        self.fc2 = nn.Linear(2048, 256)
        # self.fc3 = nn.Linear(1024, 256)
        self.fc4 = nn.Linear(256, 1)
        self.softmax = nn.Softmax(dim=1)
Exemplo n.º 22
0
 def __init__(self):
     self.best_step = 1000
     self.activations = {
         'sigmoid': nn.Sigmoid(),
         'custom': self.custom,
         'relu': nn.ReLU(),
         'relu6': nn.ReLU6(),
         'rrelu0103': nn.RReLU(0.1, 0.3),
         'rrelu0205': nn.RReLU(0.2, 0.5),
         'htang1': nn.Hardtanh(-1, 1),
         'htang2': nn.Hardtanh(-2, 2),
         'htang3': nn.Hardtanh(-3, 3),
         'tanh': nn.Tanh(),
         'elu': nn.ELU(),
         'selu': nn.SELU(),
         'hardshrink': nn.Hardshrink(),
         'leakyrelu01': nn.LeakyReLU(0.1),
         'leakyrelu001': nn.LeakyReLU(0.01),
         'logsigmoid': nn.LogSigmoid(),
         'prelu': nn.PReLU(),
     }
     self.learning_rate = 1.0
     self.hidden_size = 11
     self.activation_hidden = 'relu'
     self.activation_output = 'sigmoid'
     self.sols = {}
     self.solsSum = {}
     self.random = 0
     self.random_grid = [_ for _ in range(10)]
     self.hidden_size_grid = [3, 5, 7, 11]
     self.learning_rate_grid = [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]
     #self.learning_rate_grid = [1.0 + i/100.0 for i in range(10)]
     self.activation_hidden_grid = self.activations.keys()
     #self.activation_output_grid = self.activations.keys()
     self.grid_search = GridSearch(self)
     self.grid_search.set_enabled(True)
Exemplo n.º 23
0
def get_layers_for_network_module(nnpt_params, task_type, first_layer_units):
    layers = []
    nnpt_params["hidden_layer_info"] = {
        int(key): val
        for key, val in nnpt_params['hidden_layer_info'].items()
    }

    layers_list = sorted(tuple(nnpt_params["hidden_layer_info"]))

    print("=" * 45)
    print(nnpt_params)
    print("n_layers - ", len(layers_list))
    print("task_type - ", task_type)
    print("layers_tuple - ", layers_list)

    if task_type == "CLASSIFICATION":
        for val in layers_list:
            print(val)
            layer_dict = nnpt_params["hidden_layer_info"][val]
            layer_name = layer_dict["layer"]
            if val == 1:
                layer_units_ip = first_layer_units
            else:
                layer_units_ip = layer_dict["units_ip"]

            kernel_weight_init = layer_dict["weight_init"]
            kernel_bias_init = layer_dict["bias_init"]
            kernel_weight_constraint = layer_dict["weight_constraint"]
            layer_units_op = layer_dict["units_op"]
            #layer_bias = layer_dict["bias"]
            layer_activation = layer_dict["activation"]
            layer_batchnormalization = layer_dict["batchnormalization"]
            layer_dropout = layer_dict["dropout"]

            print("~" * 50)
            print("Layer ID - ", val)
            print("Layer Name - ", layer_name)
            print("~" * 50)

            if layer_name == "Linear":
                main_layer = nn.Linear(in_features=layer_units_ip,
                                       out_features=layer_units_op)
                get_kernel_weights(kernel_weight_init, main_layer,
                                   layer_units_ip, layer_units_op)
                get_kernel_bias(kernel_bias_init, main_layer, layer_units_ip,
                                layer_units_op)
                get_kernel_weight_constraint(kernel_weight_constraint,
                                             main_layer)
                layers.append(main_layer)
                if layer_activation != None:
                    if layer_activation["name"] == "ELU":
                        activation = nn.ELU(alpha=layer_activation["alpha"],
                                            inplace=False)
                        layers.append(activation)
                    if layer_activation["name"] == "Hardshrink":
                        activation = nn.Hardshrink(
                            lambd=layer_activation["lambd"])
                        layers.append(activation)
                    if layer_activation["name"] == "Hardtanh":
                        activation = nn.Hardtanh(
                            min_val=layer_activation["min_val"],
                            max_val=layer_activation["max_val"],
                            inplace=False)
                        layers.append(activation)
                    if layer_activation["name"] == "LeakyReLU":
                        activation = nn.LeakyReLU(
                            negative_slope=layer_activation["negative_slope"],
                            inplace=False)
                        layers.append(activation)
                    if layer_activation["name"] == "LogSigmoid":
                        activation = nn.LogSigmoid()
                        layers.append(activation)
                    if layer_activation["name"] == "MultiheadAttention":
                        activation = nn.MultiheadAttention(
                            embed_dim=layer_activation["embed_dim"],
                            num_heads=layer_activation["num_heads"],
                            dropout=layer_activation["dropout"],
                            bias=layer_activation["bias"],
                            add_bias_kv=layer_activation["add_bias_kv"],
                            add_zero_attn=layer_activation["add_zero_attn"],
                            kdim=layer_activation["kdim"],
                            vdim=layer_activation["vdim"])
                        layers.append(activation)
                    if layer_activation["name"] == "PreLU":
                        activation = nn.PreLU(
                            num_parameters=layer_activation["num_parameters"],
                            init=layer_activation["init"])
                        layers.append(activation)
                    if layer_activation["name"] == "ReLU":
                        activation = nn.ReLU()
                        layers.append(activation)
                    if layer_activation["name"] == "ReLU6":
                        activation = nn.ReLU6()
                        layers.append(activation)
                    if layer_activation["name"] == "RreLU":
                        activation = nn.RreLU(lower=layer_activation["lower"],
                                              upper=layer_activation["upper"],
                                              inplace=False)
                        layers.append(activation)
                    if layer_activation["name"] == "SELU":
                        activation = nn.SELU()
                        layers.append(activation)
                    if layer_activation["name"] == "CELU":
                        activation = nn.CELU(alpha=layer_activation["alpha"],
                                             inplace=False)
                        layers.append(activation)
                    if layer_activation["name"] == "GELU":
                        activation = nn.GELU()
                        layers.append(activation)
                    if layer_activation["name"] == "Sigmoid":
                        activation = nn.Sigmoid()
                        layers.append(activation)
                    if layer_activation["name"] == "Softplus":
                        activation = nn.Softplus(
                            beta=layer_activation["beta"],
                            threshold=layer_activation["threshold"])
                        layers.append(activation)
                    if layer_activation["name"] == "Softshrink":
                        activation = nn.Softshrink(
                            lambd=layer_activation["lambd"])
                        layers.append(activation)
                    if layer_activation["name"] == "Softsign":
                        activation = nn.Softsign()
                        layers.append(activation)
                    if layer_activation["name"] == "Tanh":
                        activation = nn.Tanh()
                        layers.append(activation)
                    if layer_activation["name"] == "Tanhshrink":
                        activation = nn.Tanhshrink()
                        layers.append(activation)
                    if layer_activation["name"] == "Threshold":
                        activation = nn.Threshold(
                            threshold=layer_activation["threshold"],
                            value=layer_activation["value"])
                        layers.append(activation)
                    if layer_activation["name"] == "Softmin":
                        activation = nn.Softmin(dim=layer_activation["dim"])
                        layers.append(activation)
                    if layer_activation["name"] == "Softmax":
                        activation = nn.Softmax(dim=layer_activation["dim"])
                        layers.append(activation)
                    if layer_activation["name"] == "Softmax2d":
                        activation = nn.Softmax2d()
                        layers.append(activation)
                    if layer_activation["name"] == "LogSoftmax":
                        activation = nn.LogSoftmax(dim=layer_activation["dim"])
                        layers.append(activation)
                    if layer_activation[
                            "name"] == "AdaptiveLogSoftmaxWithLoss":
                        activation = nn.AdaptiveLogSoftmaxWithLoss(
                            n_classes=layer_activation["n_classes"],
                            cutoffs=layer_activation["cutoffs"],
                            div_value=layer_activation["div_value"],
                            head_bias=layer_activation["head_bias"])
                        layers.append(activation)
                else:
                    pass

                if layer_batchnormalization != None:
                    if layer_batchnormalization["name"] == "BatchNorm1d":
                        batch_normalization = nn.BatchNorm1d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                        layers.append(batch_normalization)
                    if layer_batchnormalization["name"] == "BatchNorm2d":
                        batch_normalization = nn.BatchNorm2d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                        layers.append(batch_normalization)
                    if layer_batchnormalization["name"] == "BatchNorm3d":
                        batch_normalization = nn.BatchNorm3d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                        layers.append(batch_normalization)
                    if layer_batchnormalization["name"] == "SyncBatchNorm":
                        batch_normalization = nn.SyncBatchNorm(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"],
                            process_group=layer_batchnormalization[
                                "process_group"])
                        layers.append(batch_normalization)
                    if layer_batchnormalization["name"] == "InstanceNorm1d":
                        batch_normalization = nn.InstanceNorm1d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                        layers.append(batch_normalization)
                    if layer_batchnormalization["name"] == "InstanceNorm2d":
                        batch_normalization = nn.InstanceNorm2d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                        layers.append(batch_normalization)
                    if layer_batchnormalization["name"] == "InstanceNorm3d":
                        batch_normalization = nn.InstanceNorm3d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                        layers.append(batch_normalization)
                    if layer_batchnormalization["name"] == "GroupNorm":
                        batch_normalization = nn.GroupNorm(
                            num_groups=layer_batchnormalization["num_groups"],
                            num_channels=layer_batchnormalization[
                                "num_channels"],
                            eps=layer_batchnormalization["eps"],
                            affine=layer_batchnormalization["affine"])
                        layers.append(batch_normalization)
                    if layer_batchnormalization["name"] == "LayerNorm":
                        batch_normalization = nn.LayerNorm(
                            normalized_shape=layer_batchnormalization[
                                "normalized_shape"],
                            eps=layer_batchnormalization["eps"],
                            elementwise_affine=layer_batchnormalization[
                                "elementwise_affine"])
                        layers.append(batch_normalization)
                    if layer_batchnormalization["name"] == "LocalResponseNorm":
                        batch_normalization = nn.LocalResponseNorm(
                            size=layer_batchnormalization["size"],
                            alpha=layer_batchnormalization["alpha"],
                            beta=layer_batchnormalization["beta"],
                            k=layer_batchnormalization["k"])
                        layers.append(batch_normalization)
                else:
                    pass

                if layer_dropout != None:
                    if layer_dropout["name"] == "Dropout":
                        dropout = nn.Dropout(p=layer_dropout["p"],
                                             inplace=False)
                        layers.append(dropout)
                    if layer_dropout["name"] == "Dropout2d":
                        dropout = nn.Dropout2d(p=layer_dropout["p"],
                                               inplace=False)
                        layers.append(dropout)
                    if layer_dropout["name"] == "Dropout3d":
                        dropout = nn.Dropout3d(p=layer_dropout["p"],
                                               inplace=False)
                        layers.append(dropout)
                    if layer_dropout["name"] == "AlphaDropout":
                        dropout = nn.AlphaDropout(p=layer_dropout["p"],
                                                  inplace=False)
                        layers.append(dropout)
                else:
                    pass

        print("~" * 50)
        print("FINAL LAYERS FOR NETWORK - ", layers)
        print("~" * 50)

    if task_type == "REGRESSION":
        for val in layers_list:
            layer_dict = nnpt_params["hidden_layer_info"][val]
            layer_name = layer_dict["layer"]
            if val == 1:
                layer_units_ip = first_layer_units
            else:
                layer_units_ip = layer_dict["units_ip"]
            layer_units_op = layer_dict["units_op"]
            layer_bias = layer_dict["bias"]
            layer_activation = layer_dict["activation"]
            layer_batchnormalization = layer_dict["batchnormalization"]
            layer_dropout = layer_dict["dropout"]

            print("~" * 50)
            print("Layer ID - ", val)
            print("Layer Name - ", layer_name)
            print("~" * 50)

            if layer_name == "Linear":
                main_layer = nn.Linear(in_features=layer_units_ip,
                                       out_features=layer_units_op,
                                       bias=layer_bias)
                layers.append(main_layer)
                if layer_activation != None:
                    if layer_activation["name"] == "ELU":
                        activation = nn.ELU(alpha=layer_activation["alpha"],
                                            inplace=False)
                    if layer_activation["name"] == "Hardshrink":
                        activation = nn.Hardshrink(
                            lambd=layer_activation["lambd"])
                    if layer_activation["name"] == "Hardtanh":
                        activation = nn.Hardtanh(
                            min_val=layer_activation["min_val"],
                            max_val=layer_activation["max_val"],
                            inplace=False)
                    if layer_activation["name"] == "LeakyReLU":
                        activation = nn.LeakyReLU(
                            negative_slope=layer_activation["negative_slope"],
                            inplace=False)
                    if layer_activation["name"] == "LogSigmoid":
                        activation = nn.LogSigmoid()
                    if layer_activation["name"] == "MultiheadAttention":
                        activation = nn.MultiheadAttention(
                            embed_dim=layer_activation["embed_dim"],
                            num_heads=layer_activation["num_heads"],
                            dropout=layer_activation["dropout"],
                            bias=layer_activation["bias"],
                            add_bias_kv=layer_activation["add_bias_kv"],
                            add_zero_attn=layer_activation["add_zero_attn"],
                            kdim=layer_activation["kdim"],
                            vdim=layer_activation["vdim"])
                    if layer_activation["name"] == "PreLU":
                        activation = nn.PreLU(
                            num_parameters=layer_activation["num_parameters"],
                            init=layer_activation["init"])
                    if layer_activation["name"] == "ReLU":
                        activation = nn.ReLU()
                    if layer_activation["name"] == "ReLU6":
                        activation = nn.ReLU6()
                    if layer_activation["name"] == "RreLU":
                        activation = nn.RreLU(lower=layer_activation["lower"],
                                              upper=layer_activation["upper"],
                                              inplace=False)
                    if layer_activation["name"] == "SELU":
                        activation = nn.SELU()
                    if layer_activation["name"] == "CELU":
                        activation = nn.CELU(alpha=layer_activation["alpha"],
                                             inplace=False)
                    if layer_activation["name"] == "GELU":
                        activation = nn.GELU()
                    if layer_activation["name"] == "Sigmoid":
                        activation = nn.Sigmoid()
                    if layer_activation["name"] == "Softplus":
                        activation = nn.Softplus(
                            beta=layer_activation["beta"],
                            threshold=layer_activation["threshold"])
                    if layer_activation["name"] == "Softshrink":
                        activation = nn.Softshrink(
                            lambd=layer_activation["lambd"])
                    if layer_activation["name"] == "Softsign":
                        activation = nn.Softsign()
                    if layer_activation["name"] == "Tanh":
                        activation = nn.Tanh()
                    if layer_activation["name"] == "Tanhshrink":
                        activation = nn.Tanhshrink()
                    if layer_activation["name"] == "Threshold":
                        activation = nn.Threshold(
                            threshold=layer_activation["threshold"],
                            value=layer_activation["value"])
                    if layer_activation["name"] == "Softmin":
                        activation = nn.Softmin(dim=layer_activation["dim"])
                    if layer_activation["name"] == "Softmax":
                        activation = nn.Softmax(dim=layer_activation["dim"])
                    if layer_activation["name"] == "Softmax2d":
                        activation = nn.Softmax2d()
                    if layer_activation["name"] == "LogSoftmax":
                        activation = nn.LogSoftmax(dim=layer_activation["dim"])
                    if layer_activation[
                            "name"] == "AdaptiveLogSoftmaxWithLoss":
                        activation = nn.AdaptiveLogSoftmaxWithLoss(
                            n_classes=layer_activation["n_classes"],
                            cutoffs=layer_activation["cutoffs"],
                            div_value=layer_activation["div_value"],
                            head_bias=layer_activation["head_bias"])

                    layers.append(activation)
                else:
                    pass

                if layer_batchnormalization != None:
                    if layer_batchnormalization["name"] == "BatchNorm1d":
                        batch_normalization = nn.BatchNorm1d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                    if layer_batchnormalization["name"] == "BatchNorm2d":
                        batch_normalization = nn.BatchNorm2d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                    if layer_batchnormalization["name"] == "BatchNorm3d":
                        batch_normalization = nn.BatchNorm3d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                    if layer_batchnormalization["name"] == "SyncBatchNorm":
                        batch_normalization = nn.SyncBatchNorm(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"],
                            process_group=layer_batchnormalization[
                                "process_group"])
                    if layer_batchnormalization["name"] == "InstanceNorm1d":
                        batch_normalization = nn.InstanceNorm1d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                    if layer_batchnormalization["name"] == "InstanceNorm2d":
                        batch_normalization = nn.InstanceNorm2d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                    if layer_batchnormalization["name"] == "InstanceNorm3d":
                        batch_normalization = nn.InstanceNorm3d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                    if layer_batchnormalization["name"] == "GroupNorm":
                        batch_normalization = nn.GroupNorm(
                            num_groups=layer_batchnormalization["num_groups"],
                            num_channels=layer_batchnormalization[
                                "num_channels"],
                            eps=layer_batchnormalization["eps"],
                            affine=layer_batchnormalization["affine"])
                    if layer_batchnormalization["name"] == "LayerNorm":
                        batch_normalization = nn.LayerNorm(
                            normalized_shape=layer_batchnormalization[
                                "normalized_shape"],
                            eps=layer_batchnormalization["eps"],
                            elementwise_affine=layer_batchnormalization[
                                "elementwise_affine"])
                    if layer_batchnormalization["name"] == "LocalResponseNorm":
                        batch_normalization = nn.LocalResponseNorm(
                            size=layer_batchnormalization["size"],
                            alpha=layer_batchnormalization["alpha"],
                            beta=layer_batchnormalization["beta"],
                            k=layer_batchnormalization["k"])

                    layers.append(batch_normalization)
                else:
                    pass

                if layer_dropout != None:
                    if layer_dropout["name"] == "Dropout":
                        dropout = nn.Dropout(p=layer_dropout["p"],
                                             inplace=False)
                    if layer_dropout["name"] == "Dropout2d":
                        dropout = nn.Dropout2d(p=layer_dropout["p"],
                                               inplace=False)
                    if layer_dropout["name"] == "Dropout3d":
                        dropout = nn.Dropout3d(p=layer_dropout["p"],
                                               inplace=False)
                    if layer_dropout["name"] == "AlphaDropout":
                        dropout = nn.AlphaDropout(p=layer_dropout["p"],
                                                  inplace=False)

                    layers.append(dropout)
                else:
                    pass

    return layers
Exemplo n.º 24
0
 def __append_layer(self, net_style, args_dict):
     args_values_list = list(args_dict.values())
     if net_style == "Conv2d":
         self.layers.append(
             nn.Conv2d(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3],
                       args_values_list[4], args_values_list[5],
                       args_values_list[6], args_values_list[7]))
     elif net_style == "MaxPool2d":
         self.layers.append(
             nn.MaxPool2d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4], args_values_list[5]))
     elif net_style == "Linear":
         self.layers.append(
             nn.Linear(args_values_list[0], args_values_list[1],
                       args_values_list[2]))
     elif net_style == "reshape":
         # 如果是特殊情况 reshape,就直接将目标向量尺寸传入
         # print(type(args_values_list[0]))
         self.layers.append(args_values_list[0])
     elif net_style == "Conv1d":
         self.layers.append(
             nn.Conv1d(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3],
                       args_values_list[4], args_values_list[5],
                       args_values_list[6], args_values_list[7]))
     elif net_style == "Conv3d":
         self.layers.append(
             nn.Conv3d(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3],
                       args_values_list[4], args_values_list[5],
                       args_values_list[6], args_values_list[7]))
     elif net_style == "ConvTranspose1d":
         self.layers.append(
             nn.ConvTranspose1d(args_values_list[0], args_values_list[1],
                                args_values_list[2], args_values_list[3],
                                args_values_list[4], args_values_list[5],
                                args_values_list[6], args_values_list[7],
                                args_values_list[8]))
     elif net_style == "ConvTranspose2d":
         self.layers.append(
             nn.ConvTranspose2d(args_values_list[0], args_values_list[1],
                                args_values_list[2], args_values_list[3],
                                args_values_list[4], args_values_list[5],
                                args_values_list[6], args_values_list[7],
                                args_values_list[8]))
     elif net_style == "ConvTranspose3d":
         self.layers.append(
             nn.ConvTranspose3d(args_values_list[0], args_values_list[1],
                                args_values_list[2], args_values_list[3],
                                args_values_list[4], args_values_list[5],
                                args_values_list[6], args_values_list[7],
                                args_values_list[8]))
     elif net_style == "Unfold":
         self.layers.append(
             nn.Unfold(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3]))
     elif net_style == "Fold":
         self.layers.append(
             nn.Unfold(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3],
                       args_values_list[4]))
     elif net_style == "MaxPool1d":
         self.layers.append(
             nn.MaxPool1d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4], args_values_list[5]))
     elif net_style == "MaxPool3d":
         self.layers.append(
             nn.MaxPool3d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4], args_values_list[5]))
     elif net_style == "MaxUnpool1d":
         self.layers.append(
             nn.MaxUnpool1d(args_values_list[0], args_values_list[1],
                            args_values_list[2]))
     elif net_style == "MaxUnpool2d":
         self.layers.append(
             nn.MaxUnpool2d(args_values_list[0], args_values_list[1],
                            args_values_list[2]))
     elif net_style == "MaxUnpool3d":
         self.layers.append(
             nn.MaxUnpool3d(args_values_list[0], args_values_list[1],
                            args_values_list[2]))
     elif net_style == "AvgPool1d":
         self.layers.append(
             nn.AvgPool1d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4]))
     elif net_style == "AvgPool2d":
         self.layers.append(
             nn.AvgPool2d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4]))
     elif net_style == "AvgPool3d":
         self.layers.append(
             nn.AvgPool3d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4]))
     elif net_style == "FractionalMaxPool2d":
         self.layers.append(
             nn.FractionalMaxPool2d(args_values_list[0],
                                    args_values_list[1],
                                    args_values_list[2],
                                    args_values_list[3],
                                    args_values_list[4]))
     elif net_style == "LPPool1d":
         self.layers.append(
             nn.LPPool1d(args_values_list[0], args_values_list[1],
                         args_values_list[2], args_values_list[3]))
     elif net_style == "LPPool2d":
         self.layers.append(
             nn.LPPool2d(args_values_list[0], args_values_list[1],
                         args_values_list[2], args_values_list[3]))
     elif net_style == "AdaptiveMaxPool1d":
         self.layers.append(
             nn.AdaptiveMaxPool1d(args_values_list[0], args_values_list[1]))
     elif net_style == "AdaptiveMaxPool2d":
         self.layers.append(
             nn.AdaptiveMaxPool2d(args_values_list[0], args_values_list[1]))
     elif net_style == "AdaptiveMaxPool3d":
         self.layers.append(
             nn.AdaptiveMaxPool3d(args_values_list[0], args_values_list[1]))
     elif net_style == "AdaptiveAvgPool1d":
         self.layers.append(nn.AdaptiveAvgPool1d(args_values_list[0]))
     elif net_style == "AdaptiveAvgPool2d":
         self.layers.append(nn.AdaptiveAvgPool2d(args_values_list[0]))
     elif net_style == "AdaptiveAvgPool3d":
         self.layers.append(nn.AdaptiveAvgPool3d(args_values_list[0]))
     elif net_style == "ReflectionPad1d":
         self.layers.append(nn.ReflectionPad1d(args_values_list[0]))
     elif net_style == "ReflectionPad2d":
         self.layers.append(nn.ReflectionPad2d(args_values_list[0]))
     elif net_style == "ReplicationPad1d":
         self.layers.append(nn.ReplicationPad1d(args_values_list[0]))
     elif net_style == "ReplicationPad2d":
         self.layers.append(nn.ReplicationPad2d(args_values_list[0]))
     elif net_style == "ReplicationPad3d":
         self.layers.append(nn.ReplicationPad3d(args_values_list[0]))
     elif net_style == "ZeroPad2d":
         self.layers.append(nn.ZeroPad2d(args_values_list[0]))
     elif net_style == "ConstantPad1d":
         self.layers.append(
             nn.ConstantPad1d(args_values_list[0], args_values_list[1]))
     elif net_style == "ConstantPad2d":
         self.layers.append(
             nn.ConstantPad2d(args_values_list[0], args_values_list[1]))
     elif net_style == "ConstantPad3d":
         self.layers.append(
             nn.ConstantPad3d(args_values_list[0], args_values_list[1]))
     elif net_style == "ELU":
         self.layers.append(nn.ELU(args_values_list[0],
                                   args_values_list[1]))
     elif net_style == "Hardshrink":
         self.layers.append(nn.Hardshrink(args_values_list[0]))
     elif net_style == "Hardtanh":
         self.layers.append(
             nn.Hardtanh(args_values_list[0], args_values_list[1],
                         args_values_list[2], args_values_list[3],
                         args_values_list[4]))
     elif net_style == "LeakyReLU":
         self.layers.append(
             nn.LeakyReLU(args_values_list[0], args_values_list[1]))
     elif net_style == "LogSigmoid":
         self.layers.append(nn.LogSigmoid())
     elif net_style == "PReLU":
         self.layers.append(
             nn.PReLU(args_values_list[0], args_values_list[1]))
     elif net_style == "ReLU":
         self.layers.append(nn.ReLU(args_values_list[0]))
     elif net_style == "ReLU6":
         self.layers.append(nn.ReLU6(args_values_list[0]))
     elif net_style == "RReLU":
         self.layers.append(
             nn.RReLU(args_values_list[0], args_values_list[1],
                      args_values_list[2]))
     elif net_style == "SELU":
         self.layers.append(nn.SELU(args_values_list[0]))
     elif net_style == "CELU":
         self.layers.append(
             nn.CELU(args_values_list[0], args_values_list[1]))
     elif net_style == "Sigmoid":
         self.layers.append(nn.Sigmoid())
     elif net_style == "Softplus":
         self.layers.append(
             nn.Softplus(args_values_list[0], args_values_list[1]))
     elif net_style == "Softshrink":
         self.layers.append(nn.Softshrink(args_values_list[0]))
     elif net_style == "Softsign":
         self.layers.append(nn.Softsign())
     elif net_style == "Tanh":
         self.layers.append(nn.Tanh())
     elif net_style == "Tanhshrink":
         self.layers.append(nn.Tanhshrink())
     elif net_style == "Threshold":
         self.layers.append(
             nn.Threshold(args_values_list[0], args_values_list[1],
                          args_values_list[2]))
     elif net_style == "Softmin":
         self.layers.append(nn.Softmin(args_values_list[0]))
     elif net_style == "Softmax":
         self.layers.append(nn.Softmax(args_values_list[0]))
     elif net_style == "Softmax2d":
         self.layers.append(nn.Softmax2d())
     elif net_style == "LogSoftmax":
         self.layers.append(nn.LogSoftmax(args_values_list[0]))
     elif net_style == "AdaptiveLogSoftmaxWithLoss":
         self.layers.append(
             nn.AdaptiveLogSoftmaxWithLoss(args_values_list[0],
                                           args_values_list[1],
                                           args_values_list[2],
                                           args_values_list[3],
                                           args_values_list[4]))
     elif net_style == "BatchNorm1d":
         self.layers.append(
             nn.BatchNorm1d(args_values_list[0], args_values_list[1],
                            args_values_list[2], args_values_list[3],
                            args_values_list[4]))
     elif net_style == "BatchNorm2d":
         self.layers.append(
             nn.BatchNorm2d(args_values_list[0], args_values_list[1],
                            args_values_list[2], args_values_list[3],
                            args_values_list[4]))
     elif net_style == "BatchNorm3d":
         self.layers.append(
             nn.BatchNorm3d(args_values_list[0], args_values_list[1],
                            args_values_list[2], args_values_list[3],
                            args_values_list[4]))
     elif net_style == "GroupNorm":
         self.layers.append(
             nn.GroupNorm(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3]))
     elif net_style == "InstanceNorm1d":
         self.layers.append(
             nn.InstanceNorm1d(args_values_list[0], args_values_list[1],
                               args_values_list[2], args_values_list[3],
                               args_values_list[4]))
     elif net_style == "InstanceNorm2d":
         self.layers.append(
             nn.InstanceNorm2d(args_values_list[0], args_values_list[1],
                               args_values_list[2], args_values_list[3],
                               args_values_list[4]))
     elif net_style == "InstanceNorm3d":
         self.layers.append(
             nn.InstanceNorm3d(args_values_list[0], args_values_list[1],
                               args_values_list[2], args_values_list[3],
                               args_values_list[4]))
     elif net_style == "LayerNorm":
         self.layers.append(
             nn.LayerNorm(args_values_list[0], args_values_list[1],
                          args_values_list[2]))
     elif net_style == "LocalResponseNorm":
         self.layers.append(
             nn.LocalResponseNorm(args_values_list[0], args_values_list[1],
                                  args_values_list[2], args_values_list[3]))
     elif net_style == "Linear":
         self.layers.append(
             nn.Linear(args_values_list[0], args_values_list[1],
                       args_values_list[2]))
     elif net_style == "Dropout":
         self.layers.append(
             nn.Dropout(args_values_list[0], args_values_list[1]))
     elif net_style == "Dropout2d":
         self.layers.append(
             nn.Dropout2d(args_values_list[0], args_values_list[1]))
     elif net_style == "Dropout3d":
         self.layers.append(
             nn.Dropout3d(args_values_list[0], args_values_list[1]))
     elif net_style == "AlphaDropout":
         self.layers.append(
             nn.AlphaDropout(args_values_list[0], args_values_list[1]))
Exemplo n.º 25
0
from src.optimizer import RAdam, AdamW, PlainRAdam, \
    AdaBound, AdaFactor, Lamb, Lookahead, Nadam, NovoGrad, Ralamb, RaLars, SGDW

activation = nn.ModuleDict([
    ['relu', nn.ReLU()],
    ['hardtanh', nn.Hardtanh()],
    ['relu6', nn.ReLU6()],
    ['sigmoid', nn.Sigmoid()],
    ['tanh', nn.Tanh()],
    ['softmax', nn.Softmax()],
    ['softmax2d', nn.Softmax2d()],
    ['logsoftmax', nn.LogSoftmax()],
    ['elu', nn.ELU()],
    ['selu', nn.SELU()],
    ['celu', nn.CELU()],
    ['hardshrink', nn.Hardshrink()],
    ['leakyrelu', nn.LeakyReLU()],
    ['logsigmoid', nn.LogSigmoid()],
    ['softplus', nn.Softplus()],
    ['softshrink', nn.Softshrink()],
    ['prelu', nn.PReLU()],
    ['softsign', nn.Softsign()],
    ['softmin', nn.Softmin()],
    ['tanhshrink', nn.Tanhshrink()],
    ['rrelu', nn.RReLU()],
    ['glu', nn.GLU()],
])

loss = nn.ModuleDict(
    [['l1', nn.L1Loss()], ['nll', nn.NLLLoss()], ['kldiv',
                                                  nn.KLDivLoss()],
Exemplo n.º 26
0
    def __init__(self):
        super(Model, self).__init__()

        self.act_0 = nn.Hardshrink()
        self.act_1 = nn.Hardshrink(lambd=0.3)
Exemplo n.º 27
0
wt_efficiency_loader = DataLoader(wt_efficiency_set,
                                  batch_size=batch_size,
                                  shuffle=False)
eSpCas_loader = DataLoader(eSpCas_set, batch_size=batch_size, shuffle=False)
SpCas9_HF1_loader = DataLoader(SpCas9_HF1_set,
                               batch_size=batch_size,
                               shuffle=False)
# test_loader = DataLoader(test_set, batch_size=len(test_set), shuffle=False)

activation_functions = {
    'Sigmoid': nn.Sigmoid(),
    'Tanh': nn.Tanh(),
    'ReLU': nn.ReLU(),
    'LeakyReLU': nn.LeakyReLU(),
    'ELU': nn.ELU(),
    'Hardshrink': nn.Hardshrink(),
    'Hardswish': nn.Hardswish(),
    'ReLU6': nn.ReLU6(),
    'PReLU': nn.PReLU(),
    'None': nn.Identity()
}

pooling_funtion = {
    'avg': nn.AvgPool1d,
    'max': nn.MaxPool1d,
    'none': nn.Identity
}


def hook_layer(name):
    def hook(model, input, output):
Exemplo n.º 28
0
 def __init__(self):
     # Control speed of learning
     self.learning_rate = 0.05
     # Control number of hidden )neurons
     self.loss_function_key='BCELoss'
     self.hidden_size1 =128
     self.hidden_size2 =128
     self.hidden_size3 =128
     self.hidden_size4 =128
     self.hidden_size5 =8
     
     self.multiplier = 1.25
     self.momentum = 0.9
     self.lr_limit=5
     self.best_correct=0
     self.best_step=200
     self.activation_hidden_key_1 = "hardshrink"
     self.activation_hidden_key_2 = "prelu"
     self.activation_hidden_key_3 = "hardshrink"
     self.activation_hidden_key_4 = "prelu"#"prelu"
     self.batch_norm=True
     
     
     self.activations = {
             'sigmoid': nn.Sigmoid(),
             'relu': nn.ReLU(),
             'relu6': nn.ReLU6(),
             'htang1': nn.Hardtanh(-1, 1),
             'tanh': nn.Tanh(),
             'selu': nn.SELU(),
             'hardshrink': nn.Hardshrink(),
             'prelu': nn.PReLU(),
         }
     self.loss_functions = {
             'BCELoss': nn.BCELoss(),
             'MSELoss': nn.MSELoss(),
              }
     
     
     
     #self.batch_norm_grid=[True,False]
     #self.activation_hidden_key_1_grid = list(self.activations.keys())
     #self.activation_hidden_key_2_grid = list(self.activations.keys())
     #self.loss_function_key_grid = list(self.loss_functions.keys())
     # Grid search settings, see grid_search_tutorial
     #self.learning_rate_grid =[0.015]
     #self.activation_function=["relu_","selu_","sigmoid"]
     #self.loss_function_grid=["BCELoss","MSE","L1Loss","CrossEntropyLoss","CTCLoss","BCELoss","BCEWithLogitsLoss",]
     #self.loss_function_grid=["MSE","BCELoss"]
     #self.learning_rate_grid =[0.009,0.011]
     #self.activation_function1_grid=["relu_","selu_","sigmoid","tanh"]
     #self.activation_function2_grid=["relu_","selu_","sigmoid","tanh"]
     #self.activation_function1_grid=["relu_"]
     #self.activation_function2_grid=["sigmoid"]
     self.learning_rate_grid =[0.01,0.015,0.05]
     #self.momentum_grid=np.linspace(0.88,0.98,5)
     #self.hidden_size1_grid = [32,64,88]
     #self.hidden_size3_grid = [256,512]
     #self.hidden_size2_grid = [256,512]
     # grid search will initialize this field
     self.grid_search = None
     # grid search will initialize this field
     self.iter = 0
     # This fields indicate how many times to run with same arguments
     self.iter_number = 5
Exemplo n.º 29
0
    def __init__(self,
                 ksize=7,
                 sigma=30,
                 initializeDCT=True,
                 shrinkage='hard'):
        super(__class__, self).__init__()
        """
        Args:
            - ksize: patch size for the DCT
            - sigma: noise level (multiplies the threshold)
            - initializeDCT: if True, initializes the convolutional
                layers as the DCT and iDCT transforms; if false it
                uses a random initialization.
            - shrinkage: type of shrinkage used (hard thresholding, 
                soft shrinkage or tanh shrinkage)
        Returns:
            - model: initialized model
        """
        from scipy.fftpack import dct, idct
        import numpy as np

        dtype = torch.FloatTensor
        if torch.cuda.is_available(): dtype = torch.cuda.FloatTensor

        self.sigma = sigma
        self.dct = initializeDCT

        ch = ksize**2

        # pad by reflection: to have the output with the same size
        # as the input we pad the image boundaries. Usually, zero
        # padding is used for CNNs. However, since we want to
        # reproduce the DCT denoising, we use reflection padding.
        # Reflection padding is a differentiable layer.
        self.padding = nn.ReflectionPad2d(2 * ksize // 2 - 1)

        # first convolutional layer (e.g. DCT transform)
        self.conv_in = nn.Conv2d(in_channels=1,
                                 out_channels=ch,
                                 kernel_size=ksize,
                                 stride=1,
                                 padding=0,
                                 bias=not initializeDCT)

        # threshold parameter (one variable per frequency)
        self.thr = nn.Parameter(dtype(np.ones((1, ch, 1, 1))),
                                requires_grad=True)

        # shrinkage function
        if shrinkage == 'hard': self.shrinkage = nn.Hardshrink(1.)
        elif shrinkage == 'soft': self.shrinkage = nn.Softshrink(1.)
        elif shrinkage == 'tanh': self.shrinkage = nn.Tanhshrink()
        else: print('DCTlike: unknown shrinkage option %s' % (shrinkage))

        # output conv layer (e.g. inverse DCT transform)
        self.conv_out = nn.Conv2d(in_channels=ch,
                                  out_channels=1,
                                  kernel_size=ksize,
                                  stride=1,
                                  padding=0,
                                  bias=not initializeDCT)

        # initialize the isometric DCT transforms
        if initializeDCT:

            # thresholding parameters (one per feature)
            factor = 3.0 if shrinkage == 'hard' else 1.5
            thr = np.ones((1, ch, 1, 1)) * sigma / 255. * factor
            thr[0, 0] = 1e-3  # don't threshold DC component
            self.thr.data = nn.Parameter(dtype(thr), requires_grad=True)

            for i in range(ch):
                # compute dct coefficients using scipy.fftpack
                a = np.zeros((ksize, ksize))
                a.flat[i] = 1

                # first layer with direct dct transform
                a1 = dct(dct(a.T, norm='ortho', type=3).T,
                         norm='ortho',
                         type=3)

                self.conv_in.weight.data[i, 0, :, :] = nn.Parameter(dtype(a1))

                # second layer, inverse transform rotated pi degrees
                a2 = idct(idct(a.T, norm='ortho', type=2).T,
                          norm='ortho',
                          type=2)
                a2 = np.flip(np.flip(a2, axis=0), axis=1)  # pi-rotation

                self.conv_out.weight.data[0,
                                          i, :, :] = 1 / (ch) * nn.Parameter(
                                              dtype(a2.copy()))

        # random initialization
        else:
            # this comes from:
            # 1) that the image data follows N(1/2, 1/4) (so 0.5 +- 2*sigma = [0,1])
            # 2) imposing the output variance to be 0.5 (the default threshold for
            #    hardshrink)
            std = 2. / np.sqrt(5.) / ksize
            for i in range(ch):
                self.conv_in.weight.data[i, 0, :, :] = dtype(
                    std * np.random.randn(ksize, ksize))
                self.conv_out.weight.data[0, i, :, :] = dtype(
                    std * np.random.randn(ksize, ksize))
Exemplo n.º 30
0
import numpy as np
from ..utils import solutionmanager as sm
from ..utils.gridsearch import GridSearch

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

activations = [
    #  nn.Sigmoid(),
    #  nn.LogSigmoid(),
    nn.ReLU6(),
    nn.LeakyReLU(negative_slope=0.01),
    # nn.ELU(),
    # nn.SELU(),
    # nn.Hardtanh(),
    #   nn.Hardshrink(),
    nn.Hardshrink(1),
]


class SolutionModel(nn.Module):
    def __init__(self, input_size, output_size, params):
        super(SolutionModel, self).__init__()
        self.input_size = input_size
        self.params = params

        sizes = [input_size] + params.hidden_sizes + [output_size]

        self.layers = nn.ModuleList(
            nn.Linear(sizes[idx], sizes[idx+1]) for idx in range(len(sizes)-1)
        ).to(device)