예제 #1
0
    def __init__(self):
        super(Model, self).__init__()

        self.act_0 = nn.Softmin(dim=1)
        self.act_1 = nn.Softmin(dim=1)
        self.act_2 = nn.Softmin(dim=0)
        self.act_3 = nn.Softmin(dim=2)
예제 #2
0
    def __init__(self, P_portion, alpha, beta, d_xyt, device):
        super(offset_loss, self).__init__()

        self.alpha = alpha
        self.device = device
        self.P_portion = P_portion
        self.beta = beta

        self.x_kedu = d_xyt[0, 1] - d_xyt[0, 0]
        self.y_kedu = d_xyt[1, 1] - d_xyt[1, 0]
        self.t_kedu = d_xyt[2, 1] - d_xyt[2, 0]

        self.x_cube = torch.from_numpy(d_xyt[0, :]).reshape(1, P_portion).to(
            device, dtype=torch.float)
        self.y_cube = torch.from_numpy(d_xyt[1, :]).reshape(1, P_portion).to(
            device, dtype=torch.float)
        self.t_cube = torch.from_numpy(d_xyt[2, :]).reshape(1, P_portion).to(
            device, dtype=torch.float)

        # cross entropy
        self.CELoss = nn.CrossEntropyLoss(reduction='sum').to(device)

        # NLL Loss
        self.NLLLoss = nn.NLLLoss(reduction='sum').to(device)

        # softmin
        self.softmin = nn.Softmin()
예제 #3
0
    def __init__(self):
        super(MyLeNet, self).__init__()
        self.choice_conv = nn.ModuleDict({
            'conv1': nn.Conv2d(1, 10, 3, 1, 2),
            'conv2': nn.Conv2d(10, 10, 3, 1),
            'conv3': nn.Conv2d(1, 10, 3, 1),
            'conv4': nn.Conv2d(1, 10, 5, 1),
            'conv5': nn.Conv2d(1, 6, 5, 1),  ##c1
            'conv6': nn.Conv2d(6, 16, 5, 1),  ##c2
            'conv7': nn.Conv2d(16, 120, 5, 1)  ##c3
        })
        self.choice_pooling = nn.ModuleDict({
            'maxpooling1': nn.MaxPool2d(2, 2),
            #'maxpooling2':nn.MaxPool2d(1,1),
            'avgpooling1': nn.AvgPool2d(2, 2),
        })
        self.choice_activations = nn.ModuleDict({
            'rule': nn.ReLU(),
            'leakyrule': nn.LeakyReLU(),
            'logsigmoid': nn.LogSigmoid(),
            'prelu': nn.PReLU(),
            'sigmoid': nn.Sigmoid(),
            'tanh': nn.Tanh(),
            'softmin': nn.Softmin(),
            'softmax': nn.Softmax(),
            'softmax2': nn.Softmax2d()
        })

        self.choice_fc = nn.ModuleDict({
            'f1': nn.Linear(120, 84),
            'f2': nn.Linear(84, 10)
        })
예제 #4
0
def parse_activation(act):
    if act is None:
        return lambda x: x

    act, kwargs = parse_str(act)

    if act == 'sigmoid': return nn.Sigmoid(**kwargs)
    if act == 'tanh': return nn.Tanh(**kwargs)
    if act == 'relu': return nn.ReLU(**kwargs, inplace=True)
    if act == 'relu6': return nn.ReLU6(**kwargs, inplace=True)
    if act == 'elu': return nn.ELU(**kwargs, inplace=True)
    if act == 'selu': return nn.SELU(**kwargs, inplace=True)
    if act == 'prelu': return nn.PReLU(**kwargs)
    if act == 'leaky_relu': return nn.LeakyReLU(**kwargs, inplace=True)
    if act == 'threshold': return nn.Threshold(**kwargs, inplace=True)
    if act == 'hardtanh': return nn.Hardtanh(**kwargs, inplace=True)
    if act == 'log_sigmoid': return nn.LogSigmoid(**kwargs)
    if act == 'softplus': return nn.Softplus(**kwargs)
    if act == 'softshrink': return nn.Softshrink(**kwargs)
    if act == 'tanhshrink': return nn.Tanhshrink(**kwargs)
    if act == 'softmin': return nn.Softmin(**kwargs)
    if act == 'softmax': return nn.Softmax(**kwargs)
    if act == 'softmax2d': return nn.Softmax2d(**kwargs)
    if act == 'log_softmax': return nn.LogSoftmax(**kwargs)

    raise ValueError(f'unknown activation: {repr(act)}')
예제 #5
0
 def create_str_to_activations_converter(self):
     """Creates a dictionary which converts strings to activations"""
     str_to_activations_converter = {
         "elu": nn.ELU(),
         "hardshrink": nn.Hardshrink(),
         "hardtanh": nn.Hardtanh(),
         "leakyrelu": nn.LeakyReLU(),
         "logsigmoid": nn.LogSigmoid(),
         "prelu": nn.PReLU(),
         "relu": nn.ReLU(),
         "relu6": nn.ReLU6(),
         "rrelu": nn.RReLU(),
         "selu": nn.SELU(),
         "sigmoid": nn.Sigmoid(),
         "softplus": nn.Softplus(),
         "logsoftmax": nn.LogSoftmax(),
         "softshrink": nn.Softshrink(),
         "softsign": nn.Softsign(),
         "tanh": nn.Tanh(),
         "tanhshrink": nn.Tanhshrink(),
         "softmin": nn.Softmin(),
         "softmax": nn.Softmax(dim=1),
         "none": None
     }
     return str_to_activations_converter
예제 #6
0
 def __init__(self, maxdisp=192):
     super(Disp, self).__init__()
     self.maxdisp = maxdisp
     self.softmax = nn.Softmin(dim=1)
     self.disparity = DisparityRegression(maxdisp=int(self.maxdisp))
     #        self.conv32x1 = BasicConv(32, 1, kernel_size=3)
     self.conv32x1 = nn.Conv3d(32, 1, (3, 3, 3), (1, 1, 1), (1, 1, 1), bias=False)
예제 #7
0
파일: nn_ops.py 프로젝트: yuguo68/pytorch
 def __init__(self):
     super(NNActivationModule, self).__init__()
     self.activations = nn.ModuleList([
         nn.ELU(),
         nn.Hardshrink(),
         nn.Hardsigmoid(),
         nn.Hardtanh(),
         nn.Hardswish(),
         nn.LeakyReLU(),
         nn.LogSigmoid(),
         # nn.MultiheadAttention(),
         nn.PReLU(),
         nn.ReLU(),
         nn.ReLU6(),
         nn.RReLU(),
         nn.SELU(),
         nn.CELU(),
         nn.GELU(),
         nn.Sigmoid(),
         nn.SiLU(),
         nn.Mish(),
         nn.Softplus(),
         nn.Softshrink(),
         nn.Softsign(),
         nn.Tanh(),
         nn.Tanhshrink(),
         # nn.Threshold(0.1, 20),
         nn.GLU(),
         nn.Softmin(),
         nn.Softmax(),
         nn.Softmax2d(),
         nn.LogSoftmax(),
         # nn.AdaptiveLogSoftmaxWithLoss(),
     ])
예제 #8
0
 def __init__(self, margin=0, use_weight=True):
     super(TripletLoss, self).__init__()
     self.margin = margin
     self.use_weight = use_weight
     self.ranking_loss = nn.MarginRankingLoss(margin=margin, reduce=False) \
         if margin != "soft_margin" else nn.SoftMarginLoss(reduce=False)
     self.softmax = nn.Softmax(dim=1)
     self.softmin = nn.Softmin(dim=1)
예제 #9
0
 def __init__(self, input_size, output_size):  # , num_classes):
     super(SingleLayerNN, self).__init__()
     self.fc1 = nn.Linear(input_size, output_size, bias=False)
     # self.softmin = nn.Softmax() #ReLU()
     # self.softmin = nn.ReLU() #ReLU()
     # self.softmin = nn.Softmax(dim=0) #ReLU(
     self.relu = nn.ReLU()
     self.softmin = nn.Softmin(dim=-1)  # ReLU(
    def __init__(self, maxdisp=192):
        super(DispAgg, self).__init__()
        self.maxdisp = maxdisp
        self.LGA3 = LGA3(radius=2) # radius = 2, means kernel window size = 2*radius + 1 = 5;
        self.LGA2 = LGA2(radius=2)
        self.LGA = LGA(radius=2)
        self.softmax = nn.Softmin(dim=1)
        self.disparity = DisparityRegression(maxdisp=self.maxdisp)
#        self.conv32x1 = BasicConv(32, 1, kernel_size=3)
        self.conv32x1=nn.Conv3d(32, 1, (3, 3, 3), (1, 1, 1), (1, 1, 1), bias=False)
예제 #11
0
 def __init__(self, env_params):
     super(critic, self).__init__()
     self.max_action = env_params['action_max']
     self.num_reward = env_params['num_reward']
     self.fc1 = nn.Linear(
         env_params['obs'] + env_params['goal'] + env_params['action'], 256)
     self.fc2 = nn.Linear(256, 256)
     self.fc3 = nn.Linear(256, 256)
     self.intermediate_q_out = nn.Linear(256, self.num_reward)
     self.softmin = nn.Softmin(dim=1)
     self.temp = env_params['temp']
예제 #12
0
 def __init__(self, maxdisp=192):
     super(DispAgg, self).__init__()
     self.maxdisp = maxdisp
     self.LGA3 = LGA3(radius=2)
     self.LGA2 = LGA2(radius=2)
     self.LGA = LGA(radius=2)
     self.softmax = nn.Softmin(dim=1)
     self.disparity = DisparityRegression(maxdisp=int(self.maxdisp))
     self.conv64x1 = nn.Conv3d(64,
                               1, (3, 3, 3), (1, 1, 1), (1, 1, 1),
                               bias=False)
예제 #13
0
 def __init__(self, maxdisp, channel):
     super(ResidualPredition, self).__init__()
     self.maxdisp = maxdisp + 1
     self.conv3ds = nn.Sequential(BasicConv(channel, channel, kernel_size=3, padding=1, is_3d=True),
                                  BasicConv(channel, channel, kernel_size=3, padding=1, is_3d=True),
                                  BasicConv(channel, channel, kernel_size=3, padding=1, is_3d=True),
                                  BasicConv(channel, channel, kernel_size=3, padding=1, is_3d=True),
                                  BasicConv(channel, channel, kernel_size=3, padding=1, is_3d=True),
                                  BasicConv(channel, channel, kernel_size=3, padding=1, is_3d=True)
                                  )
     self.convNx1 = nn.Conv3d(channel, 1, (3, 3, 3), (1, 1, 1), (1, 1, 1), bias=False)
     self.softmax = nn.Softmin(dim=1)
예제 #14
0
 def __init__(self, maxdisp=192):
     super(DispAgg, self).__init__()
     self.maxdisp = maxdisp
     #        self.LGA3 = LGA3(radius=2)
     #        self.LGA2 = LGA2(radius=2)
     #        self.LGA = LGA(radius=2)
     self.softmax = nn.Softmin(dim=1)
     self.disparity = DisparityRegression(maxdisp=self.maxdisp)
     #        self.conv32x1 = BasicConv(32, 1, kernel_size=3)
     self.conv32x1 = nn.Conv3d(32,
                               1, (3, 3, 3), (1, 1, 1), (1, 1, 1),
                               bias=False)
     self.lgf = AttentionLgf(in_channel=32)
예제 #15
0
 def __init__(self, maxdisp=192, InChannel=32):
     super(Disp, self).__init__()
     self.maxdisp = maxdisp
     self.softmax = nn.Softmin(dim=1)
     self.disparity = DisparityRegression(maxdisp=self.maxdisp)
     #        self.conv32x1 = BasicConv(32, 1, kernel_size=3)
     if InChannel == 64:
         self.conv3d_2d = nn.Conv3d(InChannel,
                                    1, (1, 1, 1), (1, 1, 1), (1, 1, 1),
                                    bias=False)
     else:
         self.conv3d_2d = nn.Conv3d(InChannel,
                                    1, (3, 3, 3), (1, 1, 1), (1, 1, 1),
                                    bias=False)
예제 #16
0
 def __init__(self, maxdisp, channel):
     super(ResidualPredition2, self).__init__()
     self.maxdisp = maxdisp + 1
     self.conv1 = nn.Sequential(
         BasicConv(1, channel, kernel_size=3, padding=1, is_3d=True),
         BasicConv(channel, channel, kernel_size=3, padding=1, is_3d=True))
     self.conv2 = BasicConv(channel,
                            channel,
                            kernel_size=3,
                            padding=1,
                            is_3d=True)
     self.conv3 = nn.Sequential(
         BasicConv(channel, channel, kernel_size=3, padding=1, is_3d=True),
         BasicConv(channel, 1, kernel_size=3, padding=1, is_3d=True))
     self.softmax = nn.Softmin(dim=1)
     self.sga11 = SGABlock(channels=channel, refine=True)
     self.sga12 = SGABlock(channels=channel, refine=True)
     self.LGA2 = LGA2(radius=2)
m = nn.Tanhshrink()
y = m(x)
plot_activationFunc(x, y, 'Tanhshrink')

#%% [markdown]
# $Tanhshrink(x)=x−Tanh(x)$

#%%
##>17. threshold
m = nn.Threshold(0, -1)
y = m(x)
plot_activationFunc(x, y, 'Threshold')

#%%
##>18. softmin
m = nn.Softmin(dim=0)
x18 = torch.arange(6, dtype=torch.float).reshape(2, 3)
x18

#%%
m(x18)

#%%
m(x18)[:, 0].sum()

#%%
##>19. softmax
m = nn.Softmax(dim=1)
x19 = x18
m(x19)
예제 #18
0
    ['sigmoid', nn.Sigmoid()],
    ['tanh', nn.Tanh()],
    ['softmax', nn.Softmax()],
    ['softmax2d', nn.Softmax2d()],
    ['logsoftmax', nn.LogSoftmax()],
    ['elu', nn.ELU()],
    ['selu', nn.SELU()],
    ['celu', nn.CELU()],
    ['hardshrink', nn.Hardshrink()],
    ['leakyrelu', nn.LeakyReLU()],
    ['logsigmoid', nn.LogSigmoid()],
    ['softplus', nn.Softplus()],
    ['softshrink', nn.Softshrink()],
    ['prelu', nn.PReLU()],
    ['softsign', nn.Softsign()],
    ['softmin', nn.Softmin()],
    ['tanhshrink', nn.Tanhshrink()],
    ['rrelu', nn.RReLU()],
    ['glu', nn.GLU()],
])

loss = nn.ModuleDict(
    [['l1', nn.L1Loss()], ['nll', nn.NLLLoss()], ['kldiv',
                                                  nn.KLDivLoss()],
     ['mse', nn.MSELoss()], ['bce', nn.BCELoss()],
     ['bce_with_logits', nn.BCEWithLogitsLoss()],
     ['cosine_embedding', nn.CosineEmbeddingLoss()], ['ctc',
                                                      nn.CTCLoss()],
     ['hinge_embedding', nn.HingeEmbeddingLoss()],
     ['margin_ranking', nn.MarginRankingLoss()],
     ['multi_label_margin', nn.MultiLabelMarginLoss()],
예제 #19
0
파일: deep.py 프로젝트: mendo88/AI
    def __init__(self):
        super(LanguageDNN, self).__init__()

        self.conv_linear_match=512
        #agregar pading ambos lados
        self.conv_layers = nn.Sequential(

            nn.BatchNorm1d(1),
            nn.Conv1d(1,64, 3, 1,1),
            nn.ReLU(),

            nn.BatchNorm1d(64),
            nn.Conv1d(64, 64,3,1,1),
            nn.ReLU(),

            nn.BatchNorm1d(64),
            nn.Conv1d(64, 64,3,1,1),
            nn.ReLU(),

            nn.BatchNorm1d(64),
            nn.Conv1d(64, 64,3,1,1),
            nn.ReLU(),

            nn.MaxPool1d(8),

            nn.BatchNorm1d(64),
            nn.Conv1d(64, 128,3,1,1),
            nn.ReLU(),
            
            nn.BatchNorm1d(128),
            nn.Conv1d(128, 128,3,1,1),
            nn.ReLU(),

            nn.BatchNorm1d(128),
            nn.Conv1d(128, 128,3,1,1),
            nn.ReLU(),

            nn.BatchNorm1d(128),
            nn.Conv1d(128, 128,3,1,1),
            nn.ReLU(),

            nn.BatchNorm1d(128),
            nn.Conv1d(128, 128,3,1,1),
            nn.ReLU(),

            nn.MaxPool1d(8),
            
            nn.BatchNorm1d(128),
            nn.Conv1d(128, 256,3,1,1),
            nn.ReLU(),
            
            nn.BatchNorm1d(256),
            nn.Conv1d(256, 256,3,1,1),
            nn.ReLU(),

            nn.BatchNorm1d(256),
            nn.Conv1d(256, 256,3,1,1),
            nn.ReLU(),

            nn.BatchNorm1d(256),
            nn.Conv1d(256, self.conv_linear_match,3,1,1),
            nn.LogSigmoid(),
            nn.AdaptiveMaxPool1d(1))
        
        self.linear_layers = nn.Sequential(
            # nn.BatchNorm1d(self.conv_linear_match),
            nn.Dropout(0.2),
            nn.Linear(self.conv_linear_match, 512),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(512, 7),
            nn.Softmin(1)
            )
예제 #20
0
 def __append_layer(self, net_style, args_dict):
     args_values_list = list(args_dict.values())
     if net_style == "Conv2d":
         self.layers.append(
             nn.Conv2d(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3],
                       args_values_list[4], args_values_list[5],
                       args_values_list[6], args_values_list[7]))
     elif net_style == "MaxPool2d":
         self.layers.append(
             nn.MaxPool2d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4], args_values_list[5]))
     elif net_style == "Linear":
         self.layers.append(
             nn.Linear(args_values_list[0], args_values_list[1],
                       args_values_list[2]))
     elif net_style == "reshape":
         # 如果是特殊情况 reshape,就直接将目标向量尺寸传入
         # print(type(args_values_list[0]))
         self.layers.append(args_values_list[0])
     elif net_style == "Conv1d":
         self.layers.append(
             nn.Conv1d(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3],
                       args_values_list[4], args_values_list[5],
                       args_values_list[6], args_values_list[7]))
     elif net_style == "Conv3d":
         self.layers.append(
             nn.Conv3d(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3],
                       args_values_list[4], args_values_list[5],
                       args_values_list[6], args_values_list[7]))
     elif net_style == "ConvTranspose1d":
         self.layers.append(
             nn.ConvTranspose1d(args_values_list[0], args_values_list[1],
                                args_values_list[2], args_values_list[3],
                                args_values_list[4], args_values_list[5],
                                args_values_list[6], args_values_list[7],
                                args_values_list[8]))
     elif net_style == "ConvTranspose2d":
         self.layers.append(
             nn.ConvTranspose2d(args_values_list[0], args_values_list[1],
                                args_values_list[2], args_values_list[3],
                                args_values_list[4], args_values_list[5],
                                args_values_list[6], args_values_list[7],
                                args_values_list[8]))
     elif net_style == "ConvTranspose3d":
         self.layers.append(
             nn.ConvTranspose3d(args_values_list[0], args_values_list[1],
                                args_values_list[2], args_values_list[3],
                                args_values_list[4], args_values_list[5],
                                args_values_list[6], args_values_list[7],
                                args_values_list[8]))
     elif net_style == "Unfold":
         self.layers.append(
             nn.Unfold(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3]))
     elif net_style == "Fold":
         self.layers.append(
             nn.Unfold(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3],
                       args_values_list[4]))
     elif net_style == "MaxPool1d":
         self.layers.append(
             nn.MaxPool1d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4], args_values_list[5]))
     elif net_style == "MaxPool3d":
         self.layers.append(
             nn.MaxPool3d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4], args_values_list[5]))
     elif net_style == "MaxUnpool1d":
         self.layers.append(
             nn.MaxUnpool1d(args_values_list[0], args_values_list[1],
                            args_values_list[2]))
     elif net_style == "MaxUnpool2d":
         self.layers.append(
             nn.MaxUnpool2d(args_values_list[0], args_values_list[1],
                            args_values_list[2]))
     elif net_style == "MaxUnpool3d":
         self.layers.append(
             nn.MaxUnpool3d(args_values_list[0], args_values_list[1],
                            args_values_list[2]))
     elif net_style == "AvgPool1d":
         self.layers.append(
             nn.AvgPool1d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4]))
     elif net_style == "AvgPool2d":
         self.layers.append(
             nn.AvgPool2d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4]))
     elif net_style == "AvgPool3d":
         self.layers.append(
             nn.AvgPool3d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4]))
     elif net_style == "FractionalMaxPool2d":
         self.layers.append(
             nn.FractionalMaxPool2d(args_values_list[0],
                                    args_values_list[1],
                                    args_values_list[2],
                                    args_values_list[3],
                                    args_values_list[4]))
     elif net_style == "LPPool1d":
         self.layers.append(
             nn.LPPool1d(args_values_list[0], args_values_list[1],
                         args_values_list[2], args_values_list[3]))
     elif net_style == "LPPool2d":
         self.layers.append(
             nn.LPPool2d(args_values_list[0], args_values_list[1],
                         args_values_list[2], args_values_list[3]))
     elif net_style == "AdaptiveMaxPool1d":
         self.layers.append(
             nn.AdaptiveMaxPool1d(args_values_list[0], args_values_list[1]))
     elif net_style == "AdaptiveMaxPool2d":
         self.layers.append(
             nn.AdaptiveMaxPool2d(args_values_list[0], args_values_list[1]))
     elif net_style == "AdaptiveMaxPool3d":
         self.layers.append(
             nn.AdaptiveMaxPool3d(args_values_list[0], args_values_list[1]))
     elif net_style == "AdaptiveAvgPool1d":
         self.layers.append(nn.AdaptiveAvgPool1d(args_values_list[0]))
     elif net_style == "AdaptiveAvgPool2d":
         self.layers.append(nn.AdaptiveAvgPool2d(args_values_list[0]))
     elif net_style == "AdaptiveAvgPool3d":
         self.layers.append(nn.AdaptiveAvgPool3d(args_values_list[0]))
     elif net_style == "ReflectionPad1d":
         self.layers.append(nn.ReflectionPad1d(args_values_list[0]))
     elif net_style == "ReflectionPad2d":
         self.layers.append(nn.ReflectionPad2d(args_values_list[0]))
     elif net_style == "ReplicationPad1d":
         self.layers.append(nn.ReplicationPad1d(args_values_list[0]))
     elif net_style == "ReplicationPad2d":
         self.layers.append(nn.ReplicationPad2d(args_values_list[0]))
     elif net_style == "ReplicationPad3d":
         self.layers.append(nn.ReplicationPad3d(args_values_list[0]))
     elif net_style == "ZeroPad2d":
         self.layers.append(nn.ZeroPad2d(args_values_list[0]))
     elif net_style == "ConstantPad1d":
         self.layers.append(
             nn.ConstantPad1d(args_values_list[0], args_values_list[1]))
     elif net_style == "ConstantPad2d":
         self.layers.append(
             nn.ConstantPad2d(args_values_list[0], args_values_list[1]))
     elif net_style == "ConstantPad3d":
         self.layers.append(
             nn.ConstantPad3d(args_values_list[0], args_values_list[1]))
     elif net_style == "ELU":
         self.layers.append(nn.ELU(args_values_list[0],
                                   args_values_list[1]))
     elif net_style == "Hardshrink":
         self.layers.append(nn.Hardshrink(args_values_list[0]))
     elif net_style == "Hardtanh":
         self.layers.append(
             nn.Hardtanh(args_values_list[0], args_values_list[1],
                         args_values_list[2], args_values_list[3],
                         args_values_list[4]))
     elif net_style == "LeakyReLU":
         self.layers.append(
             nn.LeakyReLU(args_values_list[0], args_values_list[1]))
     elif net_style == "LogSigmoid":
         self.layers.append(nn.LogSigmoid())
     elif net_style == "PReLU":
         self.layers.append(
             nn.PReLU(args_values_list[0], args_values_list[1]))
     elif net_style == "ReLU":
         self.layers.append(nn.ReLU(args_values_list[0]))
     elif net_style == "ReLU6":
         self.layers.append(nn.ReLU6(args_values_list[0]))
     elif net_style == "RReLU":
         self.layers.append(
             nn.RReLU(args_values_list[0], args_values_list[1],
                      args_values_list[2]))
     elif net_style == "SELU":
         self.layers.append(nn.SELU(args_values_list[0]))
     elif net_style == "CELU":
         self.layers.append(
             nn.CELU(args_values_list[0], args_values_list[1]))
     elif net_style == "Sigmoid":
         self.layers.append(nn.Sigmoid())
     elif net_style == "Softplus":
         self.layers.append(
             nn.Softplus(args_values_list[0], args_values_list[1]))
     elif net_style == "Softshrink":
         self.layers.append(nn.Softshrink(args_values_list[0]))
     elif net_style == "Softsign":
         self.layers.append(nn.Softsign())
     elif net_style == "Tanh":
         self.layers.append(nn.Tanh())
     elif net_style == "Tanhshrink":
         self.layers.append(nn.Tanhshrink())
     elif net_style == "Threshold":
         self.layers.append(
             nn.Threshold(args_values_list[0], args_values_list[1],
                          args_values_list[2]))
     elif net_style == "Softmin":
         self.layers.append(nn.Softmin(args_values_list[0]))
     elif net_style == "Softmax":
         self.layers.append(nn.Softmax(args_values_list[0]))
     elif net_style == "Softmax2d":
         self.layers.append(nn.Softmax2d())
     elif net_style == "LogSoftmax":
         self.layers.append(nn.LogSoftmax(args_values_list[0]))
     elif net_style == "AdaptiveLogSoftmaxWithLoss":
         self.layers.append(
             nn.AdaptiveLogSoftmaxWithLoss(args_values_list[0],
                                           args_values_list[1],
                                           args_values_list[2],
                                           args_values_list[3],
                                           args_values_list[4]))
     elif net_style == "BatchNorm1d":
         self.layers.append(
             nn.BatchNorm1d(args_values_list[0], args_values_list[1],
                            args_values_list[2], args_values_list[3],
                            args_values_list[4]))
     elif net_style == "BatchNorm2d":
         self.layers.append(
             nn.BatchNorm2d(args_values_list[0], args_values_list[1],
                            args_values_list[2], args_values_list[3],
                            args_values_list[4]))
     elif net_style == "BatchNorm3d":
         self.layers.append(
             nn.BatchNorm3d(args_values_list[0], args_values_list[1],
                            args_values_list[2], args_values_list[3],
                            args_values_list[4]))
     elif net_style == "GroupNorm":
         self.layers.append(
             nn.GroupNorm(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3]))
     elif net_style == "InstanceNorm1d":
         self.layers.append(
             nn.InstanceNorm1d(args_values_list[0], args_values_list[1],
                               args_values_list[2], args_values_list[3],
                               args_values_list[4]))
     elif net_style == "InstanceNorm2d":
         self.layers.append(
             nn.InstanceNorm2d(args_values_list[0], args_values_list[1],
                               args_values_list[2], args_values_list[3],
                               args_values_list[4]))
     elif net_style == "InstanceNorm3d":
         self.layers.append(
             nn.InstanceNorm3d(args_values_list[0], args_values_list[1],
                               args_values_list[2], args_values_list[3],
                               args_values_list[4]))
     elif net_style == "LayerNorm":
         self.layers.append(
             nn.LayerNorm(args_values_list[0], args_values_list[1],
                          args_values_list[2]))
     elif net_style == "LocalResponseNorm":
         self.layers.append(
             nn.LocalResponseNorm(args_values_list[0], args_values_list[1],
                                  args_values_list[2], args_values_list[3]))
     elif net_style == "Linear":
         self.layers.append(
             nn.Linear(args_values_list[0], args_values_list[1],
                       args_values_list[2]))
     elif net_style == "Dropout":
         self.layers.append(
             nn.Dropout(args_values_list[0], args_values_list[1]))
     elif net_style == "Dropout2d":
         self.layers.append(
             nn.Dropout2d(args_values_list[0], args_values_list[1]))
     elif net_style == "Dropout3d":
         self.layers.append(
             nn.Dropout3d(args_values_list[0], args_values_list[1]))
     elif net_style == "AlphaDropout":
         self.layers.append(
             nn.AlphaDropout(args_values_list[0], args_values_list[1]))
예제 #21
0
    def selector(self, x):
        ''' Selector of a SARDU-Net

		    xout = mynet.selector(xin) 

		    * mynet: initialised SARDU-Net
		    * xin:   pytorch Tensor storing MRI signals from one or from multiple voxels (mini-bacth;   
			     for a mini-batch, xin has size voxels x measurements) 
		    * xout:  pytorch Tensor, same size as xin, storing MRI signals where non-selected 
			     measurements are zeroed while selected measurements are scaled by a 
			     measurement-dependent weight (if mynet.selector_update is true such weights are
			     calculated, while if mynet.selector_update is false then the weigths stored in
			     mynet.selector_weights are used)           	

		'''
        ## Calculate new selection indices if selector_update is True
        if self.selector_update == True:

            xin = torch.clone(x)

            # Pass net through fully connected layers of the selector
            for mylayer in self.selector_layers:
                x = mylayer(x)

            # Obtain indices of most informative measurements and zero the others: single input case
            if x.dim() == 1:

                # Softmin normalisation and thresholding
                layer_softmin = nn.Softmin(dim=0)
                x = layer_softmin(x)
                w_sort, w_idx = torch.sort(x, descending=True)
                x[Tensor.numpy(w_idx[(self.selector_downsampsize
                                      ):self.selector_nneurons[-1]])] = 0.0

                # Account for multi-echo data to be analysed as one block if required
                if (np.isnan(self.selector_mechoFirstTe) == False):
                    nonzero_tes = int(
                        np.sum(
                            Tensor.numpy(x[self.selector_mechoFirstTe:self.
                                           selector_mechoFirstTe +
                                           self.selector_mechoNTe] != 0))
                    )  # Number of non-zero TEs
                    # Keep only consecutive TEs up to the current number of non-zero TEs and use the same weight for each of them
                    if (nonzero_tes != 0):
                        x[self.
                          selector_mechoFirstTe:self.selector_mechoFirstTe +
                          nonzero_tes] = torch.mean(
                              x[self.selector_mechoFirstTe:self.
                                selector_mechoFirstTe +
                                self.selector_mechoNTe])
                        x[self.selector_mechoFirstTe +
                          nonzero_tes:self.selector_mechoFirstTe +
                          self.selector_mechoNTe] = 0.0

                # Get final set of selector weights
                x = x / torch.sum(x)

            # Obtain indices of most informative measurements: mini-batch case
            elif x.dim() == 2:

                # Softmin normalisation and thresholding
                layer_softmin = nn.Softmin(dim=1)
                x = layer_softmin(x)
                x = torch.mean(x, dim=0)
                w_sort, w_idx = torch.sort(x, descending=True)
                x[Tensor.numpy(w_idx[(self.selector_downsampsize):(
                    self.selector_nneurons[-1])])] = 0.0

                # Account for multi-echo data to be analysed as one block if required
                if (np.isnan(self.selector_mechoFirstTe) == False):
                    nonzero_tes = int(
                        np.sum(
                            Tensor.numpy(x[self.selector_mechoFirstTe:self.
                                           selector_mechoFirstTe +
                                           self.selector_mechoNTe] != 0))
                    )  # Number of non-zero TEs
                    # Keep only consecutive TEs up to the current number of non-zero TEs and use the same weight for each of them
                    if (nonzero_tes != 0):
                        x[self.
                          selector_mechoFirstTe:self.selector_mechoFirstTe +
                          nonzero_tes] = torch.mean(
                              x[self.selector_mechoFirstTe:self.
                                selector_mechoFirstTe +
                                self.selector_mechoNTe])
                        x[self.selector_mechoFirstTe +
                          nonzero_tes:self.selector_mechoFirstTe +
                          self.selector_mechoNTe] = 0.0

                # Get final set of selector weights
                x = x / torch.sum(x)

            else:
                raise RuntimeError(
                    'input tensors need to be 1D or 2D; your data is {}D instead'
                    .format(x.dim()))

            # Extract measurements with the newly calculated selector indices
            if xin.dim() == 1:
                xout = xin * x
            elif xin.dim() == 2:
                xout = xin * (x.repeat(xin.shape[0], 1))
            else:
                raise RuntimeError(
                    'input tensors need to be 1D or 2D; your data is {}D instead'
                    .format(xin.dim()))

            # Store updated selector indices and weights, accounting for multi-echo blocks if required
            if (np.isnan(self.selector_mechoFirstTe) == False):
                w_idx_me = Tensor(
                    np.arange(self.selector_nneurons[0], dtype=int))
                w_idx_me = w_idx_me.type(torch.LongTensor)
                self.selector_indices = w_idx_me[x != 0.0]
            else:
                w_sort_new, w_idx_new = torch.sort(
                    w_idx[0:self.selector_downsampsize])
                self.selector_indices = w_sort_new
            self.selector_weights = x

        ## Use old selection indices if selector_update is False
        elif self.selector_update == False:

            # Extract measurements
            wx = self.selector_weights
            if x.dim() == 1:
                xout = x * wx
            elif x.dim() == 2:
                xout = x * (wx.repeat(x.shape[0], 1))
            else:
                raise RuntimeError(
                    'input tensors need to be be 1D or 2D; your data is {}D instead'
                    .format(x.dim()))

        ## Error if selector_update is set to bad values
        else:
            raise RuntimeError(
                'field selector_update is {} but must be either True or False'.
                format(self.selector_update))

        # Return output made of the selection of only some input measurements, weighted by some weights
        return xout
activation_dict = {'relu': nn.ReLU(),
                   'relu6': nn.ReLU6(),
                   'prelu': nn.PReLU(),
                   'hardtanh': nn.Hardtanh(),
                   'tanh': nn.Tanh(),
                   'elu': nn.ELU(),
                   'selu': nn.SELU(),
                   'gelu': nn.GELU(),
                   'glu': nn.GLU(),
                   'swish': Swish(),
                   'sigmoid': nn.Sigmoid(),
                   'leakyrelu': nn.LeakyReLU(),
                   # 'hardsigmoid': nn.Hardsigmoid(),
                   'softsign': nn.Softsign(),
                   'softplus': nn.Softplus,
                   'softmin': nn.Softmin(),
                   'softmax': nn.Softmax()}
optimizer_dict = {'adadelta': optim.Adadelta,
                  'adagrad': optim.Adagrad,
                  'adam': optim.Adam,
                  'adamw': optim.AdamW,
                  'sparse_adam': optim.SparseAdam,
                  'adamax': optim.Adamax,
                  'asgd': optim.ASGD,
                  'sgd': optim.SGD,
                  'rprop': optim.Rprop,
                  'rmsprop': optim.RMSprop,
                  'lbfgs': optim.LBFGS}
criterion_dict = {'mae': nn.L1Loss(),
                  'mse': nn.MSELoss(),
                  'bce': nn.BCELoss(),
예제 #23
0
torch.nn.functional.logsigmoid(input)


class torch.nn.Softplus(beta=1, threshold=20)


torch.nn.functional.softplus(input, beta=1, threshold=20)


class torch.nn.Softshrink(lambd=0.5)


torch.nn.functional.softshrink(input, lambd=0.5)


class torch.nn.Softmin()


torch.nn.functional.softmin(input)


class torch.nn.Softmax()


torch.nn.functional.softmax(input)


class torch.nn.LogSoftmax()


torch.nn.functional.log_softmax(input)
예제 #24
0
 def __init__(self, maxdisp=192):
     super(Disp, self).__init__()
     self.maxdisp = maxdisp
     self.softmax = nn.Softmin(dim=1)
     self.disparity = DisparityRegression(maxdisp=self.maxdisp)
예제 #25
0
import torch
from torch import nn
m = nn.Softmin()
x = torch.randn(2, 3)
y = m(x)
print(x)
print(y)
예제 #26
0
def get_layers_for_network_module(nnpt_params, task_type, first_layer_units):
    layers = []
    nnpt_params["hidden_layer_info"] = {
        int(key): val
        for key, val in nnpt_params['hidden_layer_info'].items()
    }

    layers_list = sorted(tuple(nnpt_params["hidden_layer_info"]))

    print("=" * 45)
    print(nnpt_params)
    print("n_layers - ", len(layers_list))
    print("task_type - ", task_type)
    print("layers_tuple - ", layers_list)

    if task_type == "CLASSIFICATION":
        for val in layers_list:
            print(val)
            layer_dict = nnpt_params["hidden_layer_info"][val]
            layer_name = layer_dict["layer"]
            if val == 1:
                layer_units_ip = first_layer_units
            else:
                layer_units_ip = layer_dict["units_ip"]

            kernel_weight_init = layer_dict["weight_init"]
            kernel_bias_init = layer_dict["bias_init"]
            kernel_weight_constraint = layer_dict["weight_constraint"]
            layer_units_op = layer_dict["units_op"]
            #layer_bias = layer_dict["bias"]
            layer_activation = layer_dict["activation"]
            layer_batchnormalization = layer_dict["batchnormalization"]
            layer_dropout = layer_dict["dropout"]

            print("~" * 50)
            print("Layer ID - ", val)
            print("Layer Name - ", layer_name)
            print("~" * 50)

            if layer_name == "Linear":
                main_layer = nn.Linear(in_features=layer_units_ip,
                                       out_features=layer_units_op)
                get_kernel_weights(kernel_weight_init, main_layer,
                                   layer_units_ip, layer_units_op)
                get_kernel_bias(kernel_bias_init, main_layer, layer_units_ip,
                                layer_units_op)
                get_kernel_weight_constraint(kernel_weight_constraint,
                                             main_layer)
                layers.append(main_layer)
                if layer_activation != None:
                    if layer_activation["name"] == "ELU":
                        activation = nn.ELU(alpha=layer_activation["alpha"],
                                            inplace=False)
                        layers.append(activation)
                    if layer_activation["name"] == "Hardshrink":
                        activation = nn.Hardshrink(
                            lambd=layer_activation["lambd"])
                        layers.append(activation)
                    if layer_activation["name"] == "Hardtanh":
                        activation = nn.Hardtanh(
                            min_val=layer_activation["min_val"],
                            max_val=layer_activation["max_val"],
                            inplace=False)
                        layers.append(activation)
                    if layer_activation["name"] == "LeakyReLU":
                        activation = nn.LeakyReLU(
                            negative_slope=layer_activation["negative_slope"],
                            inplace=False)
                        layers.append(activation)
                    if layer_activation["name"] == "LogSigmoid":
                        activation = nn.LogSigmoid()
                        layers.append(activation)
                    if layer_activation["name"] == "MultiheadAttention":
                        activation = nn.MultiheadAttention(
                            embed_dim=layer_activation["embed_dim"],
                            num_heads=layer_activation["num_heads"],
                            dropout=layer_activation["dropout"],
                            bias=layer_activation["bias"],
                            add_bias_kv=layer_activation["add_bias_kv"],
                            add_zero_attn=layer_activation["add_zero_attn"],
                            kdim=layer_activation["kdim"],
                            vdim=layer_activation["vdim"])
                        layers.append(activation)
                    if layer_activation["name"] == "PreLU":
                        activation = nn.PreLU(
                            num_parameters=layer_activation["num_parameters"],
                            init=layer_activation["init"])
                        layers.append(activation)
                    if layer_activation["name"] == "ReLU":
                        activation = nn.ReLU()
                        layers.append(activation)
                    if layer_activation["name"] == "ReLU6":
                        activation = nn.ReLU6()
                        layers.append(activation)
                    if layer_activation["name"] == "RreLU":
                        activation = nn.RreLU(lower=layer_activation["lower"],
                                              upper=layer_activation["upper"],
                                              inplace=False)
                        layers.append(activation)
                    if layer_activation["name"] == "SELU":
                        activation = nn.SELU()
                        layers.append(activation)
                    if layer_activation["name"] == "CELU":
                        activation = nn.CELU(alpha=layer_activation["alpha"],
                                             inplace=False)
                        layers.append(activation)
                    if layer_activation["name"] == "GELU":
                        activation = nn.GELU()
                        layers.append(activation)
                    if layer_activation["name"] == "Sigmoid":
                        activation = nn.Sigmoid()
                        layers.append(activation)
                    if layer_activation["name"] == "Softplus":
                        activation = nn.Softplus(
                            beta=layer_activation["beta"],
                            threshold=layer_activation["threshold"])
                        layers.append(activation)
                    if layer_activation["name"] == "Softshrink":
                        activation = nn.Softshrink(
                            lambd=layer_activation["lambd"])
                        layers.append(activation)
                    if layer_activation["name"] == "Softsign":
                        activation = nn.Softsign()
                        layers.append(activation)
                    if layer_activation["name"] == "Tanh":
                        activation = nn.Tanh()
                        layers.append(activation)
                    if layer_activation["name"] == "Tanhshrink":
                        activation = nn.Tanhshrink()
                        layers.append(activation)
                    if layer_activation["name"] == "Threshold":
                        activation = nn.Threshold(
                            threshold=layer_activation["threshold"],
                            value=layer_activation["value"])
                        layers.append(activation)
                    if layer_activation["name"] == "Softmin":
                        activation = nn.Softmin(dim=layer_activation["dim"])
                        layers.append(activation)
                    if layer_activation["name"] == "Softmax":
                        activation = nn.Softmax(dim=layer_activation["dim"])
                        layers.append(activation)
                    if layer_activation["name"] == "Softmax2d":
                        activation = nn.Softmax2d()
                        layers.append(activation)
                    if layer_activation["name"] == "LogSoftmax":
                        activation = nn.LogSoftmax(dim=layer_activation["dim"])
                        layers.append(activation)
                    if layer_activation[
                            "name"] == "AdaptiveLogSoftmaxWithLoss":
                        activation = nn.AdaptiveLogSoftmaxWithLoss(
                            n_classes=layer_activation["n_classes"],
                            cutoffs=layer_activation["cutoffs"],
                            div_value=layer_activation["div_value"],
                            head_bias=layer_activation["head_bias"])
                        layers.append(activation)
                else:
                    pass

                if layer_batchnormalization != None:
                    if layer_batchnormalization["name"] == "BatchNorm1d":
                        batch_normalization = nn.BatchNorm1d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                        layers.append(batch_normalization)
                    if layer_batchnormalization["name"] == "BatchNorm2d":
                        batch_normalization = nn.BatchNorm2d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                        layers.append(batch_normalization)
                    if layer_batchnormalization["name"] == "BatchNorm3d":
                        batch_normalization = nn.BatchNorm3d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                        layers.append(batch_normalization)
                    if layer_batchnormalization["name"] == "SyncBatchNorm":
                        batch_normalization = nn.SyncBatchNorm(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"],
                            process_group=layer_batchnormalization[
                                "process_group"])
                        layers.append(batch_normalization)
                    if layer_batchnormalization["name"] == "InstanceNorm1d":
                        batch_normalization = nn.InstanceNorm1d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                        layers.append(batch_normalization)
                    if layer_batchnormalization["name"] == "InstanceNorm2d":
                        batch_normalization = nn.InstanceNorm2d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                        layers.append(batch_normalization)
                    if layer_batchnormalization["name"] == "InstanceNorm3d":
                        batch_normalization = nn.InstanceNorm3d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                        layers.append(batch_normalization)
                    if layer_batchnormalization["name"] == "GroupNorm":
                        batch_normalization = nn.GroupNorm(
                            num_groups=layer_batchnormalization["num_groups"],
                            num_channels=layer_batchnormalization[
                                "num_channels"],
                            eps=layer_batchnormalization["eps"],
                            affine=layer_batchnormalization["affine"])
                        layers.append(batch_normalization)
                    if layer_batchnormalization["name"] == "LayerNorm":
                        batch_normalization = nn.LayerNorm(
                            normalized_shape=layer_batchnormalization[
                                "normalized_shape"],
                            eps=layer_batchnormalization["eps"],
                            elementwise_affine=layer_batchnormalization[
                                "elementwise_affine"])
                        layers.append(batch_normalization)
                    if layer_batchnormalization["name"] == "LocalResponseNorm":
                        batch_normalization = nn.LocalResponseNorm(
                            size=layer_batchnormalization["size"],
                            alpha=layer_batchnormalization["alpha"],
                            beta=layer_batchnormalization["beta"],
                            k=layer_batchnormalization["k"])
                        layers.append(batch_normalization)
                else:
                    pass

                if layer_dropout != None:
                    if layer_dropout["name"] == "Dropout":
                        dropout = nn.Dropout(p=layer_dropout["p"],
                                             inplace=False)
                        layers.append(dropout)
                    if layer_dropout["name"] == "Dropout2d":
                        dropout = nn.Dropout2d(p=layer_dropout["p"],
                                               inplace=False)
                        layers.append(dropout)
                    if layer_dropout["name"] == "Dropout3d":
                        dropout = nn.Dropout3d(p=layer_dropout["p"],
                                               inplace=False)
                        layers.append(dropout)
                    if layer_dropout["name"] == "AlphaDropout":
                        dropout = nn.AlphaDropout(p=layer_dropout["p"],
                                                  inplace=False)
                        layers.append(dropout)
                else:
                    pass

        print("~" * 50)
        print("FINAL LAYERS FOR NETWORK - ", layers)
        print("~" * 50)

    if task_type == "REGRESSION":
        for val in layers_list:
            layer_dict = nnpt_params["hidden_layer_info"][val]
            layer_name = layer_dict["layer"]
            if val == 1:
                layer_units_ip = first_layer_units
            else:
                layer_units_ip = layer_dict["units_ip"]
            layer_units_op = layer_dict["units_op"]
            layer_bias = layer_dict["bias"]
            layer_activation = layer_dict["activation"]
            layer_batchnormalization = layer_dict["batchnormalization"]
            layer_dropout = layer_dict["dropout"]

            print("~" * 50)
            print("Layer ID - ", val)
            print("Layer Name - ", layer_name)
            print("~" * 50)

            if layer_name == "Linear":
                main_layer = nn.Linear(in_features=layer_units_ip,
                                       out_features=layer_units_op,
                                       bias=layer_bias)
                layers.append(main_layer)
                if layer_activation != None:
                    if layer_activation["name"] == "ELU":
                        activation = nn.ELU(alpha=layer_activation["alpha"],
                                            inplace=False)
                    if layer_activation["name"] == "Hardshrink":
                        activation = nn.Hardshrink(
                            lambd=layer_activation["lambd"])
                    if layer_activation["name"] == "Hardtanh":
                        activation = nn.Hardtanh(
                            min_val=layer_activation["min_val"],
                            max_val=layer_activation["max_val"],
                            inplace=False)
                    if layer_activation["name"] == "LeakyReLU":
                        activation = nn.LeakyReLU(
                            negative_slope=layer_activation["negative_slope"],
                            inplace=False)
                    if layer_activation["name"] == "LogSigmoid":
                        activation = nn.LogSigmoid()
                    if layer_activation["name"] == "MultiheadAttention":
                        activation = nn.MultiheadAttention(
                            embed_dim=layer_activation["embed_dim"],
                            num_heads=layer_activation["num_heads"],
                            dropout=layer_activation["dropout"],
                            bias=layer_activation["bias"],
                            add_bias_kv=layer_activation["add_bias_kv"],
                            add_zero_attn=layer_activation["add_zero_attn"],
                            kdim=layer_activation["kdim"],
                            vdim=layer_activation["vdim"])
                    if layer_activation["name"] == "PreLU":
                        activation = nn.PreLU(
                            num_parameters=layer_activation["num_parameters"],
                            init=layer_activation["init"])
                    if layer_activation["name"] == "ReLU":
                        activation = nn.ReLU()
                    if layer_activation["name"] == "ReLU6":
                        activation = nn.ReLU6()
                    if layer_activation["name"] == "RreLU":
                        activation = nn.RreLU(lower=layer_activation["lower"],
                                              upper=layer_activation["upper"],
                                              inplace=False)
                    if layer_activation["name"] == "SELU":
                        activation = nn.SELU()
                    if layer_activation["name"] == "CELU":
                        activation = nn.CELU(alpha=layer_activation["alpha"],
                                             inplace=False)
                    if layer_activation["name"] == "GELU":
                        activation = nn.GELU()
                    if layer_activation["name"] == "Sigmoid":
                        activation = nn.Sigmoid()
                    if layer_activation["name"] == "Softplus":
                        activation = nn.Softplus(
                            beta=layer_activation["beta"],
                            threshold=layer_activation["threshold"])
                    if layer_activation["name"] == "Softshrink":
                        activation = nn.Softshrink(
                            lambd=layer_activation["lambd"])
                    if layer_activation["name"] == "Softsign":
                        activation = nn.Softsign()
                    if layer_activation["name"] == "Tanh":
                        activation = nn.Tanh()
                    if layer_activation["name"] == "Tanhshrink":
                        activation = nn.Tanhshrink()
                    if layer_activation["name"] == "Threshold":
                        activation = nn.Threshold(
                            threshold=layer_activation["threshold"],
                            value=layer_activation["value"])
                    if layer_activation["name"] == "Softmin":
                        activation = nn.Softmin(dim=layer_activation["dim"])
                    if layer_activation["name"] == "Softmax":
                        activation = nn.Softmax(dim=layer_activation["dim"])
                    if layer_activation["name"] == "Softmax2d":
                        activation = nn.Softmax2d()
                    if layer_activation["name"] == "LogSoftmax":
                        activation = nn.LogSoftmax(dim=layer_activation["dim"])
                    if layer_activation[
                            "name"] == "AdaptiveLogSoftmaxWithLoss":
                        activation = nn.AdaptiveLogSoftmaxWithLoss(
                            n_classes=layer_activation["n_classes"],
                            cutoffs=layer_activation["cutoffs"],
                            div_value=layer_activation["div_value"],
                            head_bias=layer_activation["head_bias"])

                    layers.append(activation)
                else:
                    pass

                if layer_batchnormalization != None:
                    if layer_batchnormalization["name"] == "BatchNorm1d":
                        batch_normalization = nn.BatchNorm1d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                    if layer_batchnormalization["name"] == "BatchNorm2d":
                        batch_normalization = nn.BatchNorm2d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                    if layer_batchnormalization["name"] == "BatchNorm3d":
                        batch_normalization = nn.BatchNorm3d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                    if layer_batchnormalization["name"] == "SyncBatchNorm":
                        batch_normalization = nn.SyncBatchNorm(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"],
                            process_group=layer_batchnormalization[
                                "process_group"])
                    if layer_batchnormalization["name"] == "InstanceNorm1d":
                        batch_normalization = nn.InstanceNorm1d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                    if layer_batchnormalization["name"] == "InstanceNorm2d":
                        batch_normalization = nn.InstanceNorm2d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                    if layer_batchnormalization["name"] == "InstanceNorm3d":
                        batch_normalization = nn.InstanceNorm3d(
                            num_features=layer_batchnormalization[
                                "num_features"],
                            eps=layer_batchnormalization["eps"],
                            momentum=layer_batchnormalization["momentum"],
                            affine=layer_batchnormalization["affine"],
                            track_running_stats=layer_batchnormalization[
                                "track_running_stats"])
                    if layer_batchnormalization["name"] == "GroupNorm":
                        batch_normalization = nn.GroupNorm(
                            num_groups=layer_batchnormalization["num_groups"],
                            num_channels=layer_batchnormalization[
                                "num_channels"],
                            eps=layer_batchnormalization["eps"],
                            affine=layer_batchnormalization["affine"])
                    if layer_batchnormalization["name"] == "LayerNorm":
                        batch_normalization = nn.LayerNorm(
                            normalized_shape=layer_batchnormalization[
                                "normalized_shape"],
                            eps=layer_batchnormalization["eps"],
                            elementwise_affine=layer_batchnormalization[
                                "elementwise_affine"])
                    if layer_batchnormalization["name"] == "LocalResponseNorm":
                        batch_normalization = nn.LocalResponseNorm(
                            size=layer_batchnormalization["size"],
                            alpha=layer_batchnormalization["alpha"],
                            beta=layer_batchnormalization["beta"],
                            k=layer_batchnormalization["k"])

                    layers.append(batch_normalization)
                else:
                    pass

                if layer_dropout != None:
                    if layer_dropout["name"] == "Dropout":
                        dropout = nn.Dropout(p=layer_dropout["p"],
                                             inplace=False)
                    if layer_dropout["name"] == "Dropout2d":
                        dropout = nn.Dropout2d(p=layer_dropout["p"],
                                               inplace=False)
                    if layer_dropout["name"] == "Dropout3d":
                        dropout = nn.Dropout3d(p=layer_dropout["p"],
                                               inplace=False)
                    if layer_dropout["name"] == "AlphaDropout":
                        dropout = nn.AlphaDropout(p=layer_dropout["p"],
                                                  inplace=False)

                    layers.append(dropout)
                else:
                    pass

    return layers
예제 #27
0
 def __init__(self):
     super(Style_Contrastive, self).__init__()
     self.MSELoss = nn.MSELoss()
     self.softmin = nn.Softmin(dim=-1)
     self.LongTensor = torch.cuda.LongTensor if torch.cuda.is_available(
     ) else torch.LongTensor
예제 #28
0
 def __init__(self):
     super(Disp, self).__init__()
     self.softmax = nn.Softmin(dim=1)
예제 #29
0
 def __init__(self, maxdisp):
     super(DispEntropy, self).__init__()
     self.softmax = nn.Softmin(dim=1)
     self.maxdisp = maxdisp