Beispiel #1
0
    def __init__(self,
                 dim2=500,
                 dim3=64,
                 dim4=None,
                 act_func="ReLU",
                 act_func_out=None,
                 d_rate=0.5,
                 mom=0.1):
        super(MLPRank, self).__init__()
        #dim1 = 1500
        dim1 = 900
        self.layers = torch.nn.Sequential()
        self.layers.add_module("fc1", torch.nn.Linear(dim1, dim2, bias=False))
        self.layers.add_module("bn", torch.nn.BatchNorm1d(dim2))
        self.layers.add_module(act_func + "1", nnActi.get_acti(act_func))

        self.layers.add_module("fc2", torch.nn.Linear(dim2, dim3, bias=False))
        self.layers.add_module("bn2", torch.nn.BatchNorm1d(dim3, momentum=mom))
        self.layers.add_module(act_func + "2", nnActi.get_acti(act_func))

        if dim4:
            self.layers.add_module("fc3",
                                   torch.nn.Linear(dim3, dim4, bias=False))
            self.layers.add_module("bn3", torch.nn.BatchNorm1d(dim4))
            self.layers.add_module(act_func + "3", nnActi.get_acti(act_func))
            self.layers.add_module("fc4", torch.nn.Linear(dim4, 1))
        else:
            self.layers.add_module("fc3", torch.nn.Linear(dim3, 1))

        if act_func_out:
            self.layers.add_module("bn_out", torch.nn.BatchNorm1d(1))
            self.layers.add_module("act_func_out",
                                   nnActi.get_acti(act_func_out))
Beispiel #2
0
    def __init__(self,
                 dim2=500,
                 dim3=64,
                 dim4=None,
                 act_func="ReLU",
                 d_rate1=0.6,
                 d_rate2=0.4,
                 d_rate3=0.2,
                 mom=0.1):
        super(MLPSoftmaxDropoutRank, self).__init__()
        dim1 = 900
        self.layers = torch.nn.Sequential()
        self.layers.add_module("fc1", torch.nn.Linear(dim1, dim2, bias=False))
        #self.layers.add_module("dp1", torch.nn.Dropout(d_rate1))
        self.layers.add_module("bn1", torch.nn.BatchNorm1d(dim2, momentum=mom))
        self.layers.add_module(act_func + "1", nnActi.get_acti(act_func))

        self.layers.add_module("fc2", torch.nn.Linear(dim2, dim3, bias=False))
        #self.layers.add_module("dp2", torch.nn.Dropout(d_rate2))
        self.layers.add_module("bn2", torch.nn.BatchNorm1d(dim3, momentum=mom))
        self.layers.add_module(act_func + "2", nnActi.get_acti(act_func))

        if dim4:
            self.layers.add_module("fc3",
                                   torch.nn.Linear(dim3, dim4, bias=False))
            self.layers.add_module("dp3", torch.nn.Dropout(d_rate3))
            self.layers.add_module(act_func + "3", nnActi.get_acti(act_func))
            self.layers.add_module("fc4", torch.nn.Linear(dim4, 3))
        else:
            self.layers.add_module("fc3", torch.nn.Linear(dim3, 3))
Beispiel #3
0
    def __init__(self, dim2 = 500, dim3 = None, act_func = "ReLU", act_func_out = None, mom = 0.1, num_dim = 500):
        super(BasicLinear, self).__init__()
        self.num_dim = num_dim
        dim1 = self.num_dim*2
        self.layers = torch.nn.Sequential()
        #self.layers.add_module("bn0", torch.nn.BatchNorm1d(dim1))
        #self.layers.add_module("fc1", torch.nn.Linear(dim1, dim2, bias = False))
        self.layers.add_module("fc1", torch.nn.Linear(dim1, dim2))
        self.layers.add_module("bn", torch.nn.BatchNorm1d(dim2))
        self.layers.add_module(act_func + "1", nnActi.get_acti(act_func))
 
        if dim3:
            #self.layers.add_module("fc2", torch.nn.Linear(dim2, dim3, bias = False))
            self.layers.add_module("fc2", torch.nn.Linear(dim2, dim3))
            self.layers.add_module("bn2", torch.nn.BatchNorm1d(dim3, momentum = mom))
            self.layers.add_module(act_func + "2", nnActi.get_acti(act_func))
            self.layers.add_module("drop_out", torch.nn.Dropout(0.5))
            self.layers.add_module("fc3", torch.nn.Linear(dim3, 1))
        else:
            self.layers.add_module("drop_out", torch.nn.Dropout(0.5))
            self.layers.add_module("fc2", torch.nn.Linear(dim2, 1))
        
        # 因为score的分数是从-1到1, 所以对应的结果是否加一个激活函数会比较好
        if act_func_out:
            self.layers.add_module("bn_out",  torch.nn.BatchNorm1d(dim3, momentum = mom))
            self.layers.add_module(act_func_out, nnActi.get_acti(act_func_out))
Beispiel #4
0
    def __init__(self,
                 dim2=500,
                 dim3=64,
                 dim4=None,
                 act_func="ReLU",
                 d_rate=0.5):
        super(MLPSoftmaxRank, self).__init__()
        dim1 = 1500
        self.layers = torch.nn.Sequential()
        self.layers.add_module("fc1", torch.nn.Linear(dim1, dim2, bias=False))
        self.layers.add_module("bn", torch.nn.BatchNorm1d(dim2))
        self.layers.add_module(act_func + "1", nnActi.get_acti(act_func))

        self.layers.add_module("fc2", torch.nn.Linear(dim1, dim2, bias=False))
        self.layers.add_module("bn2", torch.nn.BatchNorm1d(dim3, momentum=mon))
        self.layers.add_module(act_func + "2", nnActi.get_acti(act_func))

        if dim4:
            self.layers.add_module("fc3",
                                   torch.nn.Linear(dim3, dim4, bias=False))
            self.layers.add_module("bn3", torch.nn.BatchNorm1d(dim3))
            self.layers.add_module(act_func + "3", nnActi.get_acti(act_func))
            self.layers.add_module("fc4", torch.nn.Linear(dim4, 3))
        else:
            self.layers.add_module("fc3", torch.nn.Linear(dim3, 3))

        self.layers.add_module("softmax", torch.nn.Softmax())
Beispiel #5
0
    def __init__(self,
                 dim2=64,
                 dim3=None,
                 act_func='Tanh',
                 d_rate=0.5,
                 act_func_out=None):
        super(TriLinearRank, self).__init__()
        dim1 = 500
        self.li_s1 = torch.nn.Linear(dim1, dim1, bias=False)
        self.li_s2 = torch.nn.Linear(dim1, dim1, bias=False)
        self.li_ref = torch.nn.Linear(dim1, dim1, bias=False)
        self.act_func = nnActi.get_acti(act_func)

        self.layers = torch.nn.Sequential()
        self.layers.add_module("fc1", torch.nn.Linear(dim1, dim2))
        self.laeyrs.add_module("act_func1", nnActi.get_acti(act_func))
        self.layers.add_module("dp1", torch.nn.Dropout(d_rate))
        if dim3:
            self.layers.add_module("fc2", torch.nn.Linear(dim2, dim3))
            self.layers.add_module("act_func2", nnActi.get_acti(act_func))
            self.layers.add_module("dp2", torch.nn.Dropout(d_rate))
            self.layers.add_module("fc_out", torch.nn.Linear(dim3, 1))
        else:
            self.layers.add_module("fc_out", torch.nn.Linear(dim2, 1))

        if act_func_out:
            self.layers.add_module("act_out", nnActi.get_acti(act_func_out))
Beispiel #6
0
 def __init__(self,
              num_head=8,
              num_dim_k=64,
              num_dim_v=64,
              d_rate_attn=0.1,
              act_func1="LeakyReLU",
              dim2=100,
              act_func2="LeakyReLU"):
     """
     num_head: for Attn, the number of head in MultiHeadAttention
     num_dim_k: for Attn, the number of dimension query and key will mapping to
     num_dim_v: for Attn, the number of dimension value will mapping to
     d_rate_attn: drop out rate for MultiHeadAttention 
     """
     super(MultiHeadAttnMlpModel, self).__init__()
     num_dim = 500
     num_seq = 100
     self.attn = MultiHeadAttention(num_head, num_dim, num_dim_k, num_dim_v,
                                    d_rate_attn)
     self.bn = nn.BatchNorm1d(num_dim)
     self.mlp = nn.Sequential()
     self.mlp.add_module('fc1', nn.Linear(num_seq * num_dim, num_dim))
     self.mlp.add_module('bn1', nn.BatchNorm1d(num_dim))
     self.mlp.add_module('act_fun1', nnActi.get_acti(act_func1))
     self.mlp.add_module('fc2', nn.Linear(num_dim, dim2))
     self.mlp.add_module('bn2', nn.BatchNorm1d(dim2))
     self.mlp.add_module('act_fun2', nnActi.get_acti(act_func2))
     self.mlp.add_module('fc3', nn.Linear(dim2, 1))
Beispiel #7
0
 def __init__(self, dim2=10, dim3=None, act_func='Tanh', d_rate=0.5):
     super(MaskedModelRank1, self).__init__()
     self.li_mask = torch.nn.Linear(1500, 500)
     self.sf = torch.nn.Softmax()
     self.layers = torch.nn.Sequential()
     self.layers.add_module('fc1', torch.nn.Linear(1000, dim2))
     self.layers.add_module('act_fun1', nnActi.get_acti(act_func))
     self.layers.add_module('dp1', torch.nn.Dropout(d_rate))
     if dim3:
         self.layers.add_module('fc2', torch.nn.Linear(dim2, dim3))
         self.layers.add_module('act_func2', nnActi.get_acti(act_func))
         self.layers.add_module('dp2', torch.nn.Dropout(d_rate))
         self.layers.add_module('fc3', torch.nn.Linear(dim3, 1))
     else:
         self.layers.add_module('fc2', torch.nn.Linear(dim2, 1))
Beispiel #8
0
 def __init__(self, dim2=256, dim3=64, act_func='Tanh'):
     """
     最后一层要不要激活函数呢,还有到底要几层呢??
     """
     super(Autoencoder, self).__init__()
     dim = 500
     self.encoder = nn.Sequential(
         torch.nn.Linear(dim, dim2),
         nnActi.get_acti(act_func),
         torch.nn.Linear(dim2, dim3),
     )
     self.decoder = nn.Sequential(
         torch.nn.Linear(dim3, dim2),
         nnActi.get_acti(act_func),
         torch.nn.Linear(dim2, dim),
     )
Beispiel #9
0
 def __init__(self, act_func = "ReLU", mom = 0.1):
     super(TwoLayerLinear, self).__init__()
     dim1 = 1000
     self.layers = torch.nn.Sequential()
     self.layers.add_module('fc1', torch.nn.Linear(dim1, 1)) # 整个网络就一个神经元,输入是1000,输出使1
     self.layers.add_module('bn', torch.nn.BatchNorm1d(1))
     self.layers.add_module('act', nnActi.get_acti(act_func))
Beispiel #10
0
 def __init__(self, dim2 = 64, act_func = "ReLU", act_func_out = None, d_rate = 0.5, mom = 0.1):
     super(TwoLayerRank2, self).__init__()
     #dim1 = 1500
     dim1 = 192 
     self.layers = torch.nn.Sequential()
     self.layers.add_module("fc1", torch.nn.Linear(dim1, dim2))
     self.layers.add_module("bn", torch.nn.BatchNorm1d(dim2, momentum = mom))
     self.layers.add_module("act_func2", nnActi.get_acti(act_func))
     self.layers.add_module("fc2", torch.nn.Linear(dim2, 1))
Beispiel #11
0
 def __init__(self):
     super(Simple1, self).__init__()
     dim2 = 500
     act_func = 'Tanh'
     self.mlp = torch.nn.Sequential()
     self.mlp.add_module('fc1', torch.nn.Linear(1000, dim2))
     self.mlp.add_module('act_func1', nnActi.get_acti(act_func))
     self.mlp.add_module('dp1', torch.nn.Dropout(0.5))
     self.mlp.add_module('fc2', torch.nn.Linear(dim2, 1))
Beispiel #12
0
    def __init__(self):
        super(Simple3, self).__init__()
        dim2 = 64
        act_func = 'ReLU'
        self.mlp = torch.nn.Sequential()
        self.mlp.add_module('fc1', torch.nn.Linear(500, dim2))
        self.mlp.add_module('act_func1', nnActi.get_acti(act_func))
#        self.mlp.add_module('bn1', torch.nn.BatchNorm1d(dim2))
        self.mlp.add_module('dp1', torch.nn.Dropout(0.5))
        self.mlp.add_module('fc2', torch.nn.Linear(dim2, 1))
Beispiel #13
0
    def __init__(self, dim2 = 500, dim3 = None, act_func = "ReLU", act_func_out = None, d_rate1 = 0.5, d_rate2 = 0.5):
        super(BasicLinear_dropout, self).__init__()
        dim1 = 1000
        self.layers = torch.nn.Sequential()
        self.layers.add_module("fc1", torch.nn.Linear(dim1, dim2))
        self.layers.add_module(act_func + "1", nnActi.get_acti(act_func))
 
        if dim3:
            self.layers.add_module("fc2", torch.nn.Linear(dim2, dim3))
            self.layers.add_module(act_func + "2", nnActi.get_acti(act_func))
            self.layers.add_module("drop_out", torch.nn.Dropout(d_rate1))
            self.layers.add_module("fc3", torch.nn.Linear(dim3, 1))
        else:
            self.layers.add_module("drop_out", torch.nn.Dropout(d_rate2))
            self.layers.add_module("fc2", torch.nn.Linear(dim2, 1))
        
        # 因为score的分数是从-1到1, 所以对应的结果是否加一个激活函数会比较好
        if act_func_out:
            self.layers.add_module(act_func_out, nnActi.get_acti(act_func_out))
Beispiel #14
0
 def __init__(self, dim2=10, act_func='Tanh'):
     super(MaskedModel1, self).__init__()
     self.li_mask = torch.nn.Linear(500, 500)
     self.sf = torch.nn.Softmax()
     self.li_1 = None
     if dim2:
         self.li_1 = torch.nn.Linear(500, dim2)
         self.act_func = nnActi.get_acti(act_func)
         self.li_out = torch.nn.Linear(dim2, 1)
     else:
         self.li_out = torch.nn.Linear(500, 1)
Beispiel #15
0
 def __init__(self, dim2=15, act_func="LeakyReLU", softmax=True):
     super(Conv3MlpModel_rank, self).__init__()
     dim_w = 300
     dim_h = 30
     self.conv_layer = nn.Sequential()
     self.conv_layer.add_module('conv1', nn.Conv3d(1, 128, (1, 2)))
     self.conv_layer.add_module('dp1', nn.Dropout3d(0.1))
     self.conv_layer.add_module('af1', nnActi.get_acti('ReLU'))
     self.conv_layer.add_module('mp1', nn.MaxPool3d((1, 1, 2)))
     self.conv_layer.add_module('conv2', nn.Conv3d(128, 16, (2, 2)))
     self.conv_layer.add_module('dp2', nn.Dropout3d(0.1))
     self.conv_layer.add_module('af2', nnActi.get_acti('ReLU'))
     self.conv_layer.add_module('mp2', nn.MaxPool3d((1, 2, 2)))
     self.conv_layer.add_module('conv3', nn.Conv3d(16, 1, (5, 5)))
     self.conv_layer.add_module('dp3', nn.Dropout3d(0.1))
     self.conv_layer.add_module('af3', nnActi.get_acti('ReLU'))
     self.conv_layer.add_module('mp3', nn.MaxPool3d((1, 5, 5)))
     if softmax:
         self.mlp = nn.Linear(28 * 3, 3)
     else:
         self.mlp = nn.Linear(28 * 3, 3)
Beispiel #16
0
 def __init__(self,
              num_head=8,
              num_dim_k=64,
              num_dim_v=64,
              d_rate_attn=0.1,
              dim1=20,
              act_func1="LeakyReLU",
              kernel_size1=3,
              stride1=2,
              act_func2="LeakyReLU",
              kernel_size2=3,
              stride2=2):
     """
     problematic
     same problem as described above
     """
     num_dim = 500
     #seq_len = 100
     super(MultiHeadAttnConvModel2, self).__init__()
     self.attn = MultiHeadAttention(num_head, num_dim, num_dim_k, num_dim_v,
                                    d_rate_attn)
     self.dim_conv_out1 = get_dim_out(seq_len, kernel_size1, stride1)
     self.dim_conv_out2 = get_dim_out(self.dim_conv_out1, kernel_size2,
                                      stride2)
     self.layers = nn.Sequential()
     self.layers.add_module("conv1",
                            nn.Conv1d(num_dim, dim1, kernel_size1, stride1))
     self.layers.add_module("bn1", nn.BatchNorm1d(dim1))
     self.layers.add_module("act_func1", nnActi.get_acti(act_func1))
     if self.dim_conv_out2 < 1:
         self.layers.add_module("conv2", nn.Conv1d(dim1, 1, 2, 1))
         self.dim_conv_out = get_dim_out(self.dim_conv_out1, 2, 1)
     else:
         self.layers.add_module("conv2",
                                nn.Conv1d(dim1, 1, kernel_size2, stride2))
         self.dim_conv_out = self.dim_conv_out2
     self.layers.add_module('bn2', nn.BatchNorm1d(1))
     self.layers.add_module('act_func2', nnActi.get_acti(act_func2))
     #self.layers.add_module("maxpool", nn.MaxPool1d(124))
     self.li = nn.Linear(self.dim_conv_out, 1, bias=True)
Beispiel #17
0
 def __init__(self, dim2=64, act_func="LeakyReLU", softmax=True):
     super(Conv2dMlpModel_rank, self).__init__()
     dim_w = 300
     dim_h = 50
     # conv_layer1 for one word feature
     self.conv_layer = nn.Sequential()
     self.conv_layer.add_module('conv1',
                                nn.Conv2d(6, 16, (1, 500), stride=(1, 1)))
     self.conv_layer.add_module('dp1', nn.Dropout2d(0.1))
     self.conv_layer.add_module('af1', nnActi.get_acti('ReLU'))
     #self.conv_layer.add_module('mp1', nn.MaxPool2d((1,1)))
     # conv_layer2 for two words feature
     self.conv_layer2 = nn.Sequential()
     self.conv_layer2.add_module(
         'conv2', nn.Conv2d(6,
                            16, (2, 500),
                            stride=(1, 500),
                            padding=(1, 0)))
     self.conv_layer2.add_module('dp2', nn.Dropout2d(0.1))
     self.conv_layer2.add_module('af2', nnActi.get_acti('ReLU'))
     #self.conv_layer.add_module('mp2', nn.MaxPool2d((2,1)))
     # conv_layer3 for three words feature
     self.conv_layer3 = nn.Sequential()
     self.conv_layer3.add_module(
         'conv3', nn.Conv2d(6,
                            16, (4, 500),
                            stride=(1, 500),
                            padding=(1, 0)))
     self.conv_layer3.add_module('dp3', nn.Dropout2d(0.1))
     self.conv_layer3.add_module('af3', nnActi.get_acti('ReLU'))
     #self.conv_layer.add_module('mp3', nn.MaxPool2d((2,5)))
     self.mlp = nn.Sequential()
     self.mlp = torch.nn.Sequential(
         torch.nn.Linear(1616, dim2),
         torch.nn.Dropout(0.5),
         #                torch.nn.BatchNorm1d(dim2),
         #                torch.nn.Tanh(),
         torch.nn.ReLU(),
         torch.nn.Linear(dim2, 3),
         torch.nn.LogSoftmax())
Beispiel #18
0
 def __init__(self, dim2 = 64, act_func = "ReLU", act_func_out = None, d_rate = 0.5, mom = 0.1):
     super(TwoLayerRank, self).__init__()
     dim1 = 1000
     #dim1 = 900
     #self.layers = torch.nn.Sequential()
     #self.layers.add_module("fc1", torch.nn.Linear(dim1, dim2))
     #self.layers.add_module("bn", torch.nn.BatchNorm1d(dim2, momentum = mom))
     #self.layers.add_module("act_func1", nnActi.get_acti(act_func))
     #self.layers.add_module("fc2", torch.nn.Linear(dim2, 3))
     self.fc1 = torch.nn.Linear(dim1, dim2)
     self.bn = torch.nn.BatchNorm1d(dim2, momentum = mom)
     self.af = nnActi.get_acti(act_func)
     self.fc2 = torch.nn.Linear(dim2, 3)
Beispiel #19
0
    def __init__(self, num_layers = 2):
        super(Simple5, self).__init__()
        dim2 = 500
        act_func = 'ReLU'
        self.num_layers = num_layers
        self.mlp = torch.nn.Sequential()
        self.mlp.add_module('fc1', torch.nn.Linear(500, dim2))
        self.mlp.add_module('act_func1', nnActi.get_acti(act_func))
#        self.mlp.add_module('bn1', torch.nn.BatchNorm1d(dim2))
        self.mlp.add_module('dp1', torch.nn.Dropout(0.5))
        self.mlp.add_module('fc2', torch.nn.Linear(dim2, 1))
        self.weight_layers = torch.nn.Parameter(torch.FloatTensor(self.num_layers), requires_grad = True)
        self.sf = torch.nn.Softmax()
Beispiel #20
0
 def __init__(self, dim2=128, dim3=16, act_func="LeakyReLU", softmax=True):
     num_dim = 300
     super(LSTMMlpModel_rank, self).__init__()
     self.rnn_ref = nn.LSTM(input_size=num_dim,
                            hidden_size=num_dim,
                            num_layers=1)
     self.rnn_s1 = nn.LSTM(input_size=num_dim,
                           hidden_size=num_dim,
                           num_layers=1)
     self.rnn_s2 = nn.LSTM(input_size=num_dim,
                           hidden_size=num_dim,
                           num_layers=1)
     self.mlp = nn.Sequential()
     self.mlp.add_module('fc1', nn.Linear(num_dim * 3, dim2))
     self.mlp.add_module('bn1', nn.BatchNorm1d(dim2))
     self.mlp.add_module('act_fun', nnActi.get_acti(act_func))
     self.mlp.add_module('fc2', nn.Linear(dim2, dim3))
     self.mlp.add_module('bn2', nn.BatchNorm1d(dim3))
     self.mlp.add_module('act_fun2', nnActi.get_acti(act_func))
     if softmax:
         self.mlp.add_module('fc3', nn.Linear(dim3, 3))
     else:
         self.mlp.add_module('fc3', nn.Linear(dim3, 1))
Beispiel #21
0
 def __init__(self, dim2=16, act_func="LeakyReLU", softmax=True):
     super(Conv2dMlpModel_rank, self).__init__()
     dim_w = 300
     dim_h = 50
     # conv_layer1 for one word feature
     self.conv_layer = nn.Sequential()
     self.conv_layer.add_module('conv1',
                                nn.Conv2d(6, 32, (1, 500), stride=(1, 500)))
     self.conv_layer.add_module('dp1', nn.Dropout2d(0.1))
     self.conv_layer.add_module('af1', nnActi.get_acti('ReLU'))
     #self.conv_layer.add_module('mp1', nn.MaxPool2d((1,1)))
     # conv_layer2 for two words feature
     self.conv_layer2 = nn.Sequential()
     self.conv_layer2.add_module(
         'conv2', nn.Conv2d(6,
                            32, (2, 500),
                            stride=(1, 500),
                            padding=(1, 0)))
     self.conv_layer2.add_module('dp2', nn.Dropout2d(0.1))
     self.conv_layer2.add_module('af2', nnActi.get_acti('ReLU'))
     #self.conv_layer.add_module('mp2', nn.MaxPool2d((2,1)))
     # conv_layer3 for three words feature
     self.conv_layer3 = nn.Sequential()
     self.conv_layer3.add_module(
         'conv3', nn.Conv2d(6,
                            16, (4, 500),
                            stride=(1, 500),
                            padding=(1, 0)))
     self.conv_layer3.add_module('dp3', nn.Dropout2d(0.1))
     self.conv_layer3.add_module('af3', nnActi.get_acti('ReLU'))
     #self.conv_layer.add_module('mp3', nn.MaxPool2d((2,5)))
     self.mlp = nn.Sequential()
     self.mlp.add_module('li1', nn.Linear(1952, 16))
     if softmax:
         self.mlp.add_module('li2', nn.Linear(16, 3))
     else:
         self.mlp.add_module('li2', nn.Linear(16, 3))
Beispiel #22
0
 def __init__(self, num_layers = 2):
     """
     当li输出为1维时效果很差,说明一个维度并不足以表达足够的信息。。
     """
     super(Simple8, self).__init__()
     dim2 = 16
     act_func = 'ReLU'
     self.num_layers = num_layers
     self.num_dim = 500
     self.sf = torch.nn.Softmax()
     self.cap = CapsLayer(self.num_layers, 1, self.num_dim, 64)
     self.mlp = torch.nn.Sequential()
     self.mlp.add_module('fc1', torch.nn.Linear(128, dim2))
     self.mlp.add_module('act_func1', nnActi.get_acti(act_func))
     self.mlp.add_module('dp1', torch.nn.Dropout(0.5))
     self.mlp.add_module('fc2', torch.nn.Linear(dim2, 1))
Beispiel #23
0
 def __init__(self,
              num_head=8,
              num_dim_k=64,
              num_dim_v=64,
              d_rate_attn=0.1,
              dim2=100,
              act_func2="LeakyReLU"):
     num_dim = 500
     super(MultiHeadAttnLSTMModel, self).__init__()
     self.attn = MultiHeadAttention(num_head, num_dim, num_dim_k, num_dim_v,
                                    d_rate_attn)
     self.rnn = nn.LSTM(input_size=500, hidden_size=500, num_layers=2)
     self.mlp = nn.Sequential()
     self.mlp.add_module('fc1', nn.Linear(500, dim2))
     self.mlp.add_module('bn2', nn.BatchNorm1d(dim2))
     self.mlp.add_module('act_fun2', nnActi.get_acti(act_func2))
     self.mlp.add_module('fc3', nn.Linear(dim2, 1))
Beispiel #24
0
 def __init__(self):
     super(Simple4, self).__init__()
     dim2 = 64
     act_func = 'ReLU'
     self.mlp = torch.nn.Sequential()
     self.conv_layers = torch.nn.Sequential()
     self.conv_layers.add_module('conv1', torch.nn.Conv1d(1, 64, 3, stride = 1))
     self.conv_layers.add_module('dp1', torch.nn.Dropout(0.1))
     self.conv_layers.add_module('af1', nnActi.get_acti('ReLU'))
     self.conv_layers.add_module('conv2', torch.nn.Conv1d(64, 8, 3, stride = 3))
     self.mlp = torch.nn.Sequential()
     self.mlp = torch.nn.Sequential(
             torch.nn.Linear(1328, dim2),
             torch.nn.Dropout(0.1),
             torch.nn.ReLU(),
             torch.nn.Linear(dim2, 1)
             )
Beispiel #25
0
    def __init__(self, num_layers = 2):
        """
        当li输出为1维时效果很差,说明一个维度并不足以表达足够的信息。。
        """
        super(Simple6, self).__init__()
        dim2 = 128
        act_func = 'ReLU'
        self.num_layers = num_layers
        self.num_dim = 500
        self.weight_layers = torch.nn.Parameter(torch.ones(self.num_layers), requires_grad = True)
        #self.weight_dimension = torch.nn.Parameter(torch.FloatTensor(self.num_dim), requires_grad = True)
        self.weight_dimension = torch.nn.Parameter(torch.randn(self.num_dim), requires_grad = True)
        self.sf = torch.nn.Softmax()
        self.mlp = torch.nn.Sequential()
        self.mlp.add_module('fc1', torch.nn.Linear(500, dim2))
        self.mlp.add_module('act_func1', nnActi.get_acti(act_func))
#        self.mlp.add_module('bn1', torch.nn.BatchNorm1d(dim2))
        self.mlp.add_module('dp1', torch.nn.Dropout(0.5))
        self.mlp.add_module('fc2', torch.nn.Linear(dim2, 64))
Beispiel #26
0
 def __init__(self,
              d_rate_attn=0.1,
              dim2=256,
              dim3=64,
              act_func="LeakyReLU",
              softmax=True):
     num_dim = 500
     super(ScaledDotAttnMlpModel_rank, self).__init__()
     self.attn_s1 = ScaledDotProductAttention(num_dim, d_rate_attn)
     self.attn_s2 = ScaledDotProductAttention(num_dim, d_rate_attn)
     self.mlp = nn.Sequential()
     self.mlp.add_module('fc1', nn.Linear(num_dim * 2, dim2))
     self.mlp.add_module('bn1', nn.BatchNorm1d(dim2))
     self.mlp.add_module('act_fun', nnActi.get_acti(act_func))
     #        self.mlp.add_module('fc2', nn.Linear(dim2, dim3))
     #        self.mlp.add_module('bn2', nn.BatchNorm1d(dim3))
     #        self.mlp.add_module('act_fun2', nnActi.get_acti(act_func))
     if softmax:
         self.mlp.add_module('fc3', nn.Linear(dim2, 3))
     else:
         self.mlp.add_module('fc3', nn.Linear(dim2, 1))