Beispiel #1
0
    def __init__(self, model_name, input_size, hidden_size, batch_size, kernel_size,
                 out_channels, num_layers=1, dropout=0, bidirectional=False, bn=False):
        super(LACModelUnit, self).__init__()

        # 获得GPU数量
        self.cuda_ids = np.arange(torch.cuda.device_count())

        self.model_name = model_name
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.batch_size = int(batch_size / len(self.cuda_ids))
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.num_layers = num_layers
        self.dropout = dropout
        self.bidirectional = bidirectional
        self.bn = bn

        # 创建LSTM
        self.rnn = modules.LSTM(self.input_size, self.hidden_size, self.num_layers,
                                batch_first=True, bidirectional=self.bidirectional)
        # LSTM激活函数
        self.rnn_act = modules.ReLU()
        # 创建1D-CNN
        self.cnn = modules.Conv1d(1, self.out_channels, self.kernel_size)
        # BN层
        self.bn = modules.BatchNorm1d(self.out_channels)
        # 1D-CNN激活函数
        self.cnn_act = modules.Tanh()
        # Dropout层
        self.drop = modules.Dropout(dropout)

        # 初始化LSTM参数
        self.lstm_hidden, self.lstm_cell = self.init_hidden_cell(self.batch_size)
Beispiel #2
0
 def __init__(self, nc, DIM, cb_num=8, drop_rate=0.3):
     super().__init__()
     self.prob = drop_rate
     self.encoder = Encoder(cb_num)
     fea_dim = int(64 * DIM / (2**cb_num))  # 256
     h_dim = 128
     self.linear1 = nn.Linear(fea_dim, h_dim)
     self.bn1 = nn.BatchNorm1d(h_dim)
     self.linear2 = nn.Linear(h_dim, nc)
Beispiel #3
0
def conv_block(in_channels, out_channels):
    return nn.Sequential(
        nn.Conv1d(in_channels,
                  out_channels,
                  kernel_size=K_SIZE,
                  padding=PADDING),
        nn.BatchNorm1d(out_channels),
        nn.ReLU(),
        nn.MaxPool1d(kernel_size=2),
    )
Beispiel #4
0
 def __init__(self, input_num, hidden_num, output_num):
     super(Net, self).__init__()
     self.seq = nm.Sequential(
         # BinarizeLinear(input_num, hidden_num),
         # nm.ReLU(),
         # BinarizeLinear(hidden_num, output_num),
         # #______________________________________
         # nn.BatchNorm1d(input_num),
         # nn.Linear(input_num, hidden_num),
         # nn.ReLU(),
         # nn.BatchNorm1d(hidden_num),
         # nn.Linear(hidden_num, output_num),
         # #______________________________________
         nm.BatchNorm1d(input_num),
         BinarizeLinear(input_num, hidden_num),
         nm.BatchNorm1d(hidden_num),
         BinarizeLinear(hidden_num, output_num),
         # BinarizeLinear(hidden_num, output_num),
     )
Beispiel #5
0
    def __init__(self,
                 model_name,
                 input_size,
                 hidden_size,
                 batch_size,
                 kernel_size,
                 out_channels,
                 output_size,
                 num_layers=1,
                 dropout=0,
                 bidirectional=False,
                 bn=False):
        super(LACModel, self).__init__()

        self.model_name = model_name
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.batch_size = batch_size
        self.kernel_size = kernel_size
        self.out_channels = out_channels
        self.output_size = output_size
        self.num_layers = num_layers
        self.dropout = dropout
        self.bidirectional = bidirectional
        self.bn = bn

        self.pm_model = LACModelUnit('PM-{0}'.format(model_name), input_size,
                                     hidden_size, batch_size, kernel_size,
                                     out_channels, num_layers, dropout,
                                     bidirectional, bn)
        self.gm_model = LACModelUnit('GM-{0}'.format(model_name), input_size,
                                     hidden_size, batch_size, kernel_size,
                                     out_channels, num_layers, dropout,
                                     bidirectional, bn)
        self.sm_model = LACModelUnit('SM-{0}'.format(model_name), input_size,
                                     hidden_size, batch_size, kernel_size,
                                     out_channels, num_layers, dropout,
                                     bidirectional, bn)
        # 判断是否是双线LSTM
        if bidirectional:
            num_directions = 2
        else:
            num_directions = 1

        # 创建全连接层
        self.fc = modules.Linear(
            (hidden_size * num_directions - kernel_size + 1) * out_channels *
            3, output_size)
        # BN层
        self.bn = modules.BatchNorm1d(output_size)
        # LSTM激活函数
        self.activation = modules.Softmax(dim=1)
Beispiel #6
0
 def __init__(self, DIM):
     super().__init__()
     NUM_BLOCK = 8
     FEATURE_CHN = 64
     x_dim = int(FEATURE_CHN *
                 (DIM // 2**NUM_BLOCK))  # 2048:8192; 1024:4096
     feature = 256  # 100(original), 64(CW2SQ), 32, 16. choose: 256, 16
     # print('The NUM of ConvBlocK: {}'.format(NUM_BLOCK))
     print('The FC features: {}\n'.format(feature))
     self.create_feat = nn.Linear(x_dim,
                                  feature)  # weight shape: (feature, x_dim)
     self.discriminator = nn.Sequential(nn.BatchNorm1d(feature), nn.ReLU(),
                                        nn.Linear(feature, 2))
Beispiel #7
0
    def __init__(self, in_channels, out_channels, stride=1, M=2, r=16, L=32):
        """
        :param in_channels:  输入通道维度
        :param out_channels: 输出通道维度   原论文中 输入输出通道维度相同
        :param stride:  步长,默认为1
        :param M:  分支数
        :param r: 特征Z的长度,计算其维度d 时所需的比率(论文中 特征S->Z 是降维,故需要规定 降维的下界)
        :param L:  论文中规定特征Z的下界,默认为32
        """
        super(SK_Conv1d, self).__init__()
        d = max(in_channels // r, L)  # 计算向量Z 的长度d
        self.M = M
        self.out_channels = out_channels
        self.conv = nn.ModuleList()  # 根据分支数量 添加 不同核的卷积操作
        for i in range(M):
            # 为提高效率,原论文中 扩张卷积5x5为 (3X3,dilation=2)来代替, 且论文中建议组卷积G=32,
            # 每组计算只有out_channel/groups = 2 个channel参与.
            self.conv.append(
                nn.Sequential(
                    nn.Conv1d(in_channels,
                              out_channels,
                              3,
                              stride,
                              padding=1 + i,
                              dilation=1 + i,
                              groups=32,
                              bias=False), nn.BatchNorm1d(out_channels),
                    nn.ReLU(inplace=True)))
        self.global_pool = nn.AdaptiveAvgPool1d(
            1)  # 自适应pool到指定维度, 这里指定为1,实现 GAP

        self.fc1 = nn.Sequential(nn.Conv1d(out_channels, d, 1, bias=False),
                                 nn.BatchNorm1d(d),
                                 nn.ReLU(inplace=True))  # 降维
        self.fc2 = nn.Conv1d(d, out_channels * M, 1, 1, bias=False)  # 升维
        # self.fcs = nn.ModuleList(self.fc1, self.fc2)
        self.softmax = nn.Softmax(
            dim=1)  # 指定dim=1  使得两个全连接层对应位置进行softmax,保证 对应位置a+b+..=1
Beispiel #8
0
 def __init__(self, input_num, hidden_num, output_num):
     super(Net, self).__init__()
     self.seq = nm.Sequential(nm.BatchNorm1d(input_num),
                              nm.Linear(input_num, hidden_num), nm.ReLU(),
                              nm.BatchNorm1d(hidden_num),
                              nm.Linear(hidden_num, output_num))