Esempio n. 1
0
    def __init__(
        self,
        vocab_size,
        embedding_size,
        input_dropout,
        fc_hidden_size,
        num_classes,
        rnn_units,
        rnn_layers,
        rnn_dropout,
    ):
        super().__init__()

        self.embedding = nn.Embedding(
            vocab_size + 1,
            embedding_size,
            padding_idx=0,
        )

        self.dropout = nn.Dropout(input_dropout)

        self.rnn = nn.GRU(
            input_size=embedding_size,
            hidden_size=rnn_units,
            num_layers=rnn_layers,
            batch_first=True,
            dropout=rnn_dropout,
            bidirectional=True,
        )

        self.global_pooling_max = nn.AdaptiveMaxPool1d(1)
        self.global_pooling_avg = nn.AdaptiveAvgPool1d(1)

        self.fc = nn.Sequential(
            nn.Linear(2 * 2 * rnn_units, fc_hidden_size),
            nn.ReLU(),
            nn.Linear(fc_hidden_size, num_classes),
        )
Esempio n. 2
0
    def __init__(self, n_classes: int, embedding_size: int, kernel_sizes_cnn: List[int],
                 filters_cnn: Union[int, List[int]], dense_size: int, dropout_rate: float = 0.0,
                 embedded_tokens: bool = True, vocab_size: Optional[int] = None, **kwargs):
        super().__init__()
        self.embedded_tokens = embedded_tokens
        self.kernel_sizes_cnn = kernel_sizes_cnn

        if not embedded_tokens and vocab_size:
            self.embedding = nn.Embedding(vocab_size, embedding_size)
        if isinstance(filters_cnn, int):
            filters_cnn = len(kernel_sizes_cnn) * [filters_cnn]

        for i in range(len(kernel_sizes_cnn)):
            setattr(self, "conv_" + str(i), nn.Conv1d(embedding_size, filters_cnn[i], kernel_sizes_cnn[i],
                                                      padding=kernel_sizes_cnn[i]))
            setattr(self, "bn_" + str(i), nn.BatchNorm1d(filters_cnn[i]))
            setattr(self, "relu_" + str(i), nn.ReLU())
            setattr(self, "pool_" + str(i), nn.AdaptiveMaxPool1d(1))

        self.dropout = nn.Dropout(dropout_rate)
        self.dense = nn.Linear(sum(filters_cnn), dense_size)
        self.relu_dense = nn.ReLU()
        self.final_dense = nn.Linear(dense_size, n_classes)
Esempio n. 3
0
    def __init__(self, input_channel, output_class):

        super(SignalRecognitionNet_v4, self).__init__()
        self.conv1 = nn.Conv1d(input_channel, 4, 3, 1, 1)
        self.bn1 = nn.BatchNorm1d(4)
        self.conv2 = nn.Conv1d(4, 4, 3, stride=1, padding=1)  # P0
        self.bn2 = nn.BatchNorm1d(4)
        self.conv3 = nn.Conv1d(4, 8, 3, 2, 1)  # P1 /2
        self.bn3 = nn.BatchNorm1d(8)
        self.conv4 = nn.Conv1d(8, 16, 3, 1, 1)  # P2 /4
        self.bn4 = nn.BatchNorm1d(16)
        self.conv5 = nn.Conv1d(16, 32, 3, 2, 1)  # P3 /8
        self.bn5 = nn.BatchNorm1d(32)
        self.conv6 = nn.Conv1d(32, 64, 3, 1, 1)  # P4 /16
        self.bn6 = nn.BatchNorm1d(64)
        self.conv7 = nn.Conv1d(64, 32, 3, 2, 1)  # P5 /32
        self.bn7 = nn.BatchNorm1d(32)

        self.roi_pooling = nn.AdaptiveMaxPool1d(32)
        self.liner1 = nn.Linear(32 * 32, 32 * 16)
        self.liner2 = nn.Linear(32 * 16, 16 * 16)
        self.liner3 = nn.Linear(16 * 16, 16 * 8)
        self.liner4 = nn.Linear(128, output_class)
Esempio n. 4
0
 def __init__(self,
              block=ResidualBlock,
              layers=[2, 2, 2, 2, 2, 2],
              num_classes=28):
     super(KKstreamModelRNNCNN3, self).__init__()
     self.in_channels = 16
     self.bi_gru = nn.GRU(23,
                          128,
                          num_layers=3,
                          dropout=0.375,
                          bidirectional=True)
     self.conv1 = conv1x7(256, 16, padding=3)
     self.bn1 = nn.BatchNorm1d(16)
     self.elu = nn.ELU(inplace=True)
     self.layer1 = self.make_layer(block, 16, layers[0])
     self.layer2 = self.make_layer(block, 32, layers[1], 2)
     self.layer3 = self.make_layer(block, 64, layers[2], 2)  #small
     self.layer4 = self.make_layer(block, 128, layers[3], 2)  #big
     self.layer5 = self.make_layer(block, 256, layers[4], 2)  #large
     self.layer6 = self.make_layer(block, 512, layers[5], 2)  #large
     self.gap = nn.AdaptiveMaxPool1d(1)
     self.fc = nn.Linear(512, num_classes)
     self.ativ = nn.Sigmoid()
Esempio n. 5
0
    def __init__(self,
                 input_channel_count,
                 output_channel_count,
                 kernel_size=5):
        """
        Creates a model for combining character embeddings through a 1D
        convolution.

        Args:
          - input_channel_count: The number of channels in the input
                                 (input depth).
          - output_channel_count (int): The number of filters used in the 1D
                                 convolution. This gives the number of output
                                 channels (output depth). In our case filter
                                 will be embed_word_size (e_word)
          - kernel_size (int): The size of the window used to compute features.
                               Default: 5
        """
        super(CNN, self).__init__()
        self.conv = nn.Conv1d(in_channels=input_channel_count,
                              out_channels=output_channel_count,
                              kernel_size=kernel_size)
        self.maxpool = nn.AdaptiveMaxPool1d(output_size=1)
Esempio n. 6
0
    def __init__(self, bert_vocab_num, emb_dim, hidden_dim, output_dim):
        super(Baseline, self).__init__()
        self.bert_vocab_num = bert_vocab_num
        self.emb_dim = emb_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim

        self.embedding_layer = nn.Embedding(
            num_embeddings=bert_vocab_num,
            embedding_dim=emb_dim,
            padding_idx=0,
        )
        self.gru_layer = PackingRNN(
            nn.GRU(input_size=emb_dim,
                   hidden_size=hidden_dim,
                   num_layers=1,
                   batch_first=True,
                   bidirectional=True))
        self.conv_layer = SamePaddingConv1d(emb_dim, hidden_dim, kernel_size=3)
        self.max_pooler = nn.AdaptiveMaxPool1d(1)
        self.fc = nn.Sequential(nn.Linear(hidden_dim * 3, hidden_dim),
                                nn.BatchNorm1d(hidden_dim), nn.ReLU(),
                                nn.Linear(hidden_dim, output_dim))
    def __init__(self, ntokens: int, char_embedding_dim: int,
                 char_padding_idx: int, dropout: float, kernel_size: int,
                 out_channels: int, target_emb: int, use_highway: bool):
        super(WordCharCNNEmbedding, self).__init__()
        self._use_highway = use_highway
        self._char_padding_idx = char_padding_idx
        self.embedding_size = out_channels

        if self._use_highway and out_channels != target_emb:
            raise ValueError("out_channels and target_emb must be "
                             "equal in highway setting")

        print("asdfsadf", ntokens)
        self.char_embedding = nn.Embedding(ntokens, char_embedding_dim,
                                           char_padding_idx)
        self.conv_embedding = nn.Sequential(
            nn.Dropout(p=dropout),
            nn.Conv1d(in_channels=char_embedding_dim,
                      out_channels=out_channels,
                      kernel_size=kernel_size,
                      padding=kernel_size - 1), nn.AdaptiveMaxPool1d(1))
        self.proj_layer = TimeDistributed(nn.Linear(out_channels, target_emb))
        self.out_dropout = nn.Dropout(p=dropout)
Esempio n. 8
0
 def __init__(self, in_channel=1, out_channel=10):
     super(Mexhat_LeNet, self).__init__()
     self.conv1 = nn.Sequential(
         Mexh_fast(64, 16),
         nn.BatchNorm1d(64),
         nn.ReLU(),
         nn.MaxPool1d(kernel_size=2, stride=2),
     )
     self.conv2 = nn.Sequential(
         nn.Conv1d(64, 16, 5),
         nn.BatchNorm1d(16),
         nn.ReLU(),
         nn.AdaptiveMaxPool1d(25)  # adaptive change the outputsize to (16,5)
     )
     self.fc1 = nn.Sequential(
         nn.Linear(16 * 25, 120),
         nn.ReLU()
     )
     self.fc2 = nn.Sequential(
         nn.Linear(120, 84),
         nn.ReLU()
     )
     self.fc3 = nn.Linear(84, out_channel)
Esempio n. 9
0
 def __init__(self, encoder, w_init=xavier_init):
     super().__init__()
     self.encoder = encoder
     self.total_labels = 264
     self.layers = nn.ModuleList([
         nn.Conv1d(in_channels=64, out_channels=32,
             kernel_size=3, stride=1,  padding=1),
         nn.BatchNorm1d(32),
         nn.ReLU(inplace=True),
         nn.Conv1d(in_channels=32, out_channels=16,
             kernel_size=3, stride=1,  padding=1),
         nn.BatchNorm1d(16),
         nn.ReLU(inplace=True),
         nn.Conv1d(in_channels=16, out_channels=1,
             kernel_size=3, stride=1,  padding=1),
         nn.ReLU(inplace=True),
         nn.AdaptiveMaxPool1d(1000),
         nn.ReLU(inplace=True),
         nn.Linear(1000, 264),
         nn.Softmax(dim=0),
     ])
     for layer in self.layers:
         w_init(layer)
Esempio n. 10
0
    def __init__(self, num_classes: int, embedding_dim: int, k_max: int,
                 vocab: Vocab) -> None:
        """

        :param num_classes: the number of classes
        :param embedding_dim: Dimension of embedding vector for token
        :param k_max: k_max pooling
        :param vocab: gluonnlp
        """
        super(VDCNN, self).__init__()
        self._structure = nn.Sequential(
            nn.Embedding(16, embedding_dim, 0), Permute(),
            nn.Conv1d(embedding_dim, 64, kernel_size=(3, 1, 1)),
            ConvBlock(64, 64), ConvBlock(64, 64), nn.MaxPool1d(2, 2),
            ConvBlock(64, 128), ConvBlock(128, 128), nn.MaxPool1d(2, 2),
            ConvBlock(128, 256), ConvBlock(256, 256), nn.MaxPool1d(2, 2),
            ConvBlock(256, 512), ConvBlock(512, 512),
            nn.AdaptiveMaxPool1d(k_max), FullyConnected())

        self._classifier = nn.Sequential(nn.Linear(512 * k_max,
                                                   2048), nn.ReLU(),
                                         nn.Linear(2048, 2048), nn.ReLU(),
                                         nn.Linear(2048, num_classes))
Esempio n. 11
0
 def __init__(self):
     super(NetworkCnn, self).__init__()
     """
     TODO:
     Create and initialise weights and biases for the layers.
     """
     self.conv1 = tnn.Sequential(
         tnn.Conv1d(in_channels=50,
                    out_channels=50,
                    kernel_size=8,
                    padding=5), tnn.ReLU(), tnn.MaxPool1d(kernel_size=4))
     self.conv2 = tnn.Sequential(
         tnn.Conv1d(in_channels=50,
                    out_channels=50,
                    kernel_size=8,
                    padding=5), tnn.ReLU(), tnn.MaxPool1d(kernel_size=4))
     self.conv3 = tnn.Sequential(
         tnn.Conv1d(in_channels=50,
                    out_channels=50,
                    kernel_size=8,
                    padding=5), tnn.ReLU(),
         tnn.AdaptiveMaxPool1d(output_size=1))
     self.fc1 = tnn.Linear(in_features=50, out_features=1)
Esempio n. 12
0
    def __init__(self, chunk_size=65536, overlap=512, min_chunk_size=1024):
        """
        chunk_size: how many bytes at a time to process. Increasing may improve compute efficent, but use more memory. Total memory use will be a function of chunk_size, and not of the length of the input sequence L

        overlap: how many bytes of overlap to use between chunks

        """
        super(LowMemConvBase, self).__init__()
        self.chunk_size = chunk_size
        self.overlap = overlap
        self.min_chunk_size = min_chunk_size

        # Used for pooling over time in a meory efficent way
        self.pooling = nn.AdaptiveMaxPool1d(1)
        #   self.pooling.register_backward_hook(drop_zeros_hook)
        self.cat = CatMod()
        self.cat.register_backward_hook(drop_zeros_hook)
        self.receptive_field = None

        # Used to force checkpoint code to behave correctly due to poor design https://discuss.pytorch.org/t/checkpoint-with-no-grad-requiring-inputs-problem/19117/11
        self.dummy_tensor = torch.ones(1,
                                       dtype=torch.float32,
                                       requires_grad=True)
Esempio n. 13
0
    def __init__(self, one_action_actor_init):
        network_init = one_action_actor_init['network']
        super(NActionActor, self).__init__()
        torch.manual_seed(one_action_actor_init['seed'])
        self.action_dim = network_init["o_size"] // 3
        self.entropy_learning_rate = one_action_actor_init[
            'entropy_learning_rate']
        self.optimizer = None
        self.loss_history = list()
        self.state_representation = one_action_actor_init[
            "state_representation"]

        self.relu = nn.ReLU()

        self.conv1 = nn.Sequential(nn.ConstantPad1d(15 // 2, 0.25),
                                   nn.Conv1d(4, 400, 15), nn.LeakyReLU(0.1),
                                   nn.AdaptiveMaxPool1d(1))

        self.l1 = nn.Linear(network_init["i_size"], network_init["l1_size"])
        self.l2 = nn.Linear(network_init["l1_size"], network_init["l2_size"])
        self.l3 = nn.Linear(network_init["l2_size"], network_init["o_size"])
        self.softmax = nn.Softmax(dim=1)
        self.softmax_dim_0 = nn.Softmax(dim=0)
Esempio n. 14
0
    def __init__(self, backbone, num_classes, feature_dim, criterion=None):
        super(MultiTaskWithLoss, self).__init__()
        self.basemodel = backbones.__dict__[backbone]()
        self.criterion = criterion
        self.num_tasks = len(num_classes)
        #普通
        self.fcs = nn.ModuleList([
            nn.Linear(feature_dim, num_classes[k])
            for k in range(self.num_tasks)
        ])

        # 第一任务加空间注意力  --  要注意backbone里要去掉avg_pooling及之后的层,都搬到外面来
        # self.global_avg_pooling = nn.AdaptiveAvgPool2d(1)
        # self.ca = ChannelAttention(feature_dim)
        # self.sa = SpatialAttention()
        # self.fcs = nn.ModuleList()
        # for k in range(self.num_tasks):
        #     # if k == 1:
        #     # self.fcs.append(nn.Sequential(self.sa,nn.Linear(feature_dim, num_classes[k]))) #sa
        #     self.fcs.append(nn.Sequential(self.ca,self.sa,nn.Linear(feature_dim, num_classes[k])))
        # else:
        # self.fcs.append(nn.Linear(feature_dim, num_classes[k]))
        self.layers = nn.AdaptiveMaxPool1d(10)
Esempio n. 15
0
    def __init__(self):
        super(CharEncoder, self).__init__()

        self.char_embedding = nn.Embedding(num_embeddings=2000,  # 字符表的大小
                                           embedding_dim=100,  # 字符向量的维度
                                           padding_idx=0)    # 0索引的向量置0
        nn.init.xavier_uniform_(self.char_embedding.weight, gain=nn.init.calculate_gain('relu'))
        # nn.init.uniform_(self.char_embedding.weight.data, -0.32, 0.32)

        self._win_sizes = [3, 4, 5]
        self._cnn_encoder = nn.ModuleList([
            nn.Sequential(
                nn.Conv1d(in_channels=100,  # 字符向量维度
                          out_channels=50,  # 卷积输出的特征维度
                          kernel_size=w),
                nn.ReLU(),
                nn.AdaptiveMaxPool1d(1)
            ) for w in self._win_sizes
        ])

        # 使卷积提取的特征更加稠密
        self.cnn_dense = nn.Linear(in_features=50 * len(self._win_sizes),
                                   out_features=150)

        self.bilstm = RNNNet(input_size=150,
                             hidden_size=200,
                             nb_layer=1,
                             dropout=0.0,
                             batch_first=True,
                             bidirectional=True)

        # 分类
        self.dense = nn.Sequential(
            nn.Linear(200, 100),
            nn.ReLU(),
            nn.Linear(100, 3)
        )
Esempio n. 16
0
    def __init__(self, pretrained=False, in_channel=1, out_channel=10):
        super(CNN, self).__init__()
        if pretrained == True:
            warnings.warn("Pretrained model is not available")

        self.layer1 = nn.Sequential(
            nn.Conv1d(in_channel, 16, kernel_size=15),  # 16, 26 ,26
            nn.BatchNorm1d(16),
            nn.ReLU(inplace=True))

        self.layer2 = nn.Sequential(
            nn.Conv1d(16, 32, kernel_size=3),  # 32, 24, 24
            nn.BatchNorm1d(32),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=2, stride=2),
        )  # 32, 12,12     (24-2) /2 +1

        self.layer3 = nn.Sequential(
            nn.Conv1d(32, 64, kernel_size=3),  # 64,10,10
            nn.BatchNorm1d(64),
            nn.ReLU(inplace=True))

        self.layer4 = nn.Sequential(
            nn.Conv1d(64, 128, kernel_size=3),  # 128,8,8
            nn.BatchNorm1d(128),
            nn.ReLU(inplace=True),
            nn.AdaptiveMaxPool1d(4))  # 128, 4,4

        self.layer5 = nn.Sequential(
            nn.Linear(128 * 4, 256),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(256, 256),
            nn.ReLU(inplace=True),
            nn.Dropout(),
        )
        self.fc = nn.Linear(256, out_channel)
Esempio n. 17
0
    def __init__(self, config):
        super().__init__()
        self.output_channel = config.output_channel
        self.mode = config.mode
        self.batchnorm = config.batchnorm
        self.beta_ema = config.beta_ema
        self.dynamic_pool = config.dynamic_pool
        self.dynamic_pool_length = config.dynamic_pool_length
        self.has_bottleneck = config.bottleneck_layer
        self.bottleneck_units = config.bottleneck_units
        self.filter_widths = 3

        config.vocab = config.dataset.CODE_FIELD.vocab.vectors
        self.collection_encoder = CollectionEncoder(config)

        self.dropout = nn.Dropout(config.dropout)

        if self.dynamic_pool:
            self.dynamic_pool = nn.AdaptiveMaxPool1d(self.dynamic_pool_length)  # Dynamic pooling
            if self.has_bottleneck:
                self.fc1 = nn.Linear(self.filter_widths * self.output_channel * self.dynamic_pool_length, self.bottleneck_units)
                self.fc2 = nn.Linear(self.bottleneck_units, config.target_class)
            else:
                self.fc1 = nn.Linear(self.filter_widths * self.output_channel * self.dynamic_pool_length, config.target_class)

        else:
            if self.has_bottleneck:
                self.fc1 = nn.Linear(self.filter_widths * self.output_channel, self.bottleneck_units)
                self.fc2 = nn.Linear(self.bottleneck_units, config.target_class)
            else:
                self.fc1 = nn.Linear(self.filter_widths * self.output_channel, config.target_class)

        if self.beta_ema > 0:
            self.avg_param = deepcopy([p.data for p in self.parameters()])
            if torch.cuda.is_available():
                self.avg_param = [a.cuda() for a in self.avg_param]
            self.steps_ema = 0.0
Esempio n. 18
0
    def __init__(self, max_features, token_len, embedding_size, weights):
        super(AvitorWord, self).__init__()
        self.max_features = max_features
        self.embedding_size = embedding_size
        self.token_len = token_len
        self.n_word_layer = len(token_len)

        self.word_layers = []

        for i, tkl in enumerate(token_len):
            embedding = nn.Embedding(self.max_features, self.embedding_size)
            embedding.weight = nn.Parameter(
                torch.from_numpy(np.array(weights)).double())
            embedding.weight.requires_grad = False

            word_layer = nn.Sequential(
                embedding,
                # FloatTensor(),
                # nn.Dropout(0.5),
                TensorRotate(),
                # BiRNN(self.embedding_size, 32, 2, 16),
                nn.BatchNorm1d(self.embedding_size),
                nn.Conv1d(in_channels=self.embedding_size,
                          out_channels=50,
                          kernel_size=3),
                nn.ReLU(),
                nn.AdaptiveMaxPool1d(1),
                Flatten(),
                nn.Dropout(0.5))
            self.add_module(f"word_layer_{i}", word_layer)
            self.word_layers.append(word_layer)

        self.out_features = 50

        for m in self.modules():
            if isinstance(m, nn.Conv1d):
                nn.init.xavier_normal_(m.weight)
Esempio n. 19
0
    def __init__(self, vocab, device, num_classes=3, hidden_size=512, attention_dim=256, embeddings_dim=300, linear_pre_1dim=32, linear_pre_2dim=64, pad_token='<pad>', use_pretrained_embeddings=True, dropout_rate=0.1):

        super(RNN_Combined_Model, self).__init__()

        self.embedding_size =  (embeddings_dim if not use_pretrained_embeddings else vocab.vectors.shape[1])
        self.vocabulary_size = len(vocab)
        self.hidden_size=hidden_size
        self.device=device


        self.sentence1_embedding = nn.Embedding(self.vocabulary_size, self.embedding_size, padding_idx=vocab.stoi[pad_token])
        self.sentence2_embedding = nn.Embedding(self.vocabulary_size, self.embedding_size, padding_idx=vocab.stoi[pad_token])

        if use_pretrained_embeddings:
            self.sentence1_embedding.from_pretrained(vocab.vectors, freeze=False, padding_idx=vocab.stoi[pad_token])
            self.sentence2_embedding.from_pretrained(vocab.vectors, freeze=False, padding_idx=vocab.stoi[pad_token])

        self.sentence1_dropout = nn.Dropout(dropout_rate)
        self.sentence2_dropout = nn.Dropout(dropout_rate)

        self.sentence1_lstm=nn.LSTM(input_size=self.embedding_size, hidden_size=hidden_size, bidirectional=True, batch_first=True)
        self.sentence2_lstm = nn.LSTM(input_size=self.embedding_size, hidden_size=hidden_size, bidirectional=True, batch_first=True)

        self.linear_sentence1=nn.Linear(self.hidden_size * 2, attention_dim)
        self.linear_sentence2 = nn.Linear(self.hidden_size * 2, attention_dim)

        self.linear_combined = nn.Linear(3*attention_dim, attention_dim)
        self.pool = nn.AdaptiveAvgPool1d(1)
        self.pool1 = nn.AdaptiveMaxPool1d(1)

        self.linear_pre2_classifier=nn.Linear(attention_dim,linear_pre_2dim)
        self.bn_pre2_classifier=nn.BatchNorm1d(linear_pre_2dim)
        self.relu2 = nn.ReLU()
        self.linear_pre1_classifier=nn.Linear(linear_pre_2dim, linear_pre_1dim)
        self.bn_pre1_classifier = nn.BatchNorm1d(linear_pre_1dim)
        self.relu1 = nn.ReLU()
        self.classifier=nn.Linear(linear_pre_1dim, num_classes)
Esempio n. 20
0
    def forward(self, x):
        batch_size = x.size(0)

        x = self.conv1(
            x
        )  # (batch_size, 3*2, num_points, k) -> (batch_size, 64, num_points, k)
        x = self.conv1_2(x)
        x = self.conv2(
            x
        )  # (batch_size, 64, num_points, k) -> (batch_size, 128, num_points, k)
        m = nn.AdaptiveMaxPool2d((x.size(2), 1))  #.view(batch_size, -1)
        x = m(x).squeeze(-1)
        #x = x.max(dim=-1, keepdim=False)[0]     # (batch_size, 128, num_points, k) -> (batch_size, 128, num_points)

        x = self.conv3(
            x
        )  # (batch_size, 128, num_points) -> (batch_size, 1024, num_points)
        m = nn.AdaptiveMaxPool1d(1)  #.view(batch_size, -1)
        x = m(x).squeeze(-1)
        #x = x.max(dim=-1, keepdim=False)[0]     # (batch_size, 1024, num_points) -> (batch_size, 1024)

        x = F.leaky_relu(
            self.bn4(self.linear1(x)),
            negative_slope=0.2)  # (batch_size, 1024) -> (batch_size, 512)
        x = F.leaky_relu(
            self.bn5(self.linear2(x)),
            negative_slope=0.2)  # (batch_size, 512) -> (batch_size, 256)

        #initialize as identity
        init = torch.eye(self.k, requires_grad=True).repeat(batch_size, 1, 1)
        if x.is_cuda:
            init = init.cuda()
        x = self.transform(x)  # (batch_size, 256) -> (batch_size, 3*3)
        x = x.view(batch_size, self.k,
                   self.k) + init  # (batch_size, 3*3) -> (batch_size, 3, 3)

        return x
Esempio n. 21
0
    def __init__(self, n_classes):
        super(Net2, self).__init__()
        self.n_classes = n_classes
        kernel_size = 5
        padding = (kernel_size - 1) / 2
        stride = 1
        width = 32

        self.dropout = nn.Dropout(0.2)
        self.conv1 = nn.Conv1d(13,
                               width,
                               kernel_size,
                               stride,
                               padding,
                               1,
                               bias=False)
        self.bn1 = nn.BatchNorm1d(width)
        self.conv2 = nn.Conv1d(width,
                               2 * width,
                               kernel_size,
                               stride,
                               padding,
                               2,
                               bias=False)
        self.bn2 = nn.BatchNorm1d(2 * width)
        self.conv3 = nn.Conv1d(2 * width,
                               4 * width,
                               kernel_size,
                               stride,
                               padding,
                               4,
                               bias=False)
        self.bn3 = nn.BatchNorm1d(4 * width)

        self.pool = nn.MaxPool1d(2)
        self.globalpool = nn.AdaptiveMaxPool1d(1)
        self.fc = nn.Linear(4 * width, n_classes)
Esempio n. 22
0
    def __init__(self, num_classes: int, embedding_dim: int, k_max: int,
                 embedding_size: int) -> None:
        """Instantiating VDCNN class

        Args:
            num_classes (int): the number of classes
            embedding_dim (int): embedding dimension of token
            k_max (int): parameter of k-max pooling following last convolution block
            embedding_size (dict): token2idx
        """
        super(VDCNN, self).__init__()

        self._extractor = nn.Sequential(
            nn.Embedding(embedding_size, embedding_dim, 1), Permute(),
            nn.Conv1d(embedding_dim, 64, 3, 1, 1), ConvBlock(64, 64),
            ConvBlock(64, 64), nn.MaxPool1d(2, 2), ConvBlock(64, 128),
            ConvBlock(128, 128), nn.MaxPool1d(2, 2), ConvBlock(128, 256),
            ConvBlock(256, 256), nn.MaxPool1d(2, 2), ConvBlock(256, 512),
            ConvBlock(512, 512), nn.AdaptiveMaxPool1d(k_max), Flatten())

        self._classifier = nn.Sequential(nn.Linear(512 * k_max,
                                                   2048), nn.ReLU(),
                                         nn.Linear(2048, 2048), nn.ReLU(),
                                         nn.Linear(2048, num_classes))
Esempio n. 23
0
    def __init__(self, num_classes=40, use_normals=True, points=512,
                 blocks=[1, 2, 1, 1], embed_channel=32, k_neighbors=[16, 16, 16, 16],
                 heads=8, dim_head=16, expansion=2, reducer=4, pool="avg", **kwargs):
        super(Develop1, self).__init__()
        self.stages = len(blocks)
        self.num_classes = num_classes
        channel = 6 if use_normals else 3
        self.linear = nn.Linear(channel, embed_channel)
        self.transformer_stages = nn.ModuleList()
        self.transformer_downs = nn.ModuleList()
        self.groupers = nn.ModuleList()
        for stage, block_num in enumerate(blocks):
            # for appending transformer blocks
            factor = expansion ** stage
            factor_d = int(math.sqrt(factor))
            factor_h = factor // factor_d
            transformer_blocks = []
            for _ in range(block_num):
                transformer_blocks.append(
                    TransformerBlock(dim=embed_channel * factor, heads=heads * factor_h, dim_head=dim_head * factor_d)
                )
            transformer_blocks = nn.Sequential(*transformer_blocks)
            self.transformer_stages.append(transformer_blocks)

            # for appending transformer groups
            knn = k_neighbors[stage]
            self.groupers.append(FPSKNNGrouper(points=points // (reducer ** stage), knn=knn))

            # for appending transformer downs
            self.transformer_downs.append(
                TransformerDown(in_dim=embed_channel * factor, out_dim=embed_channel * factor * expansion,
                                hid_dim=embed_channel)
            )

        self.pool = nn.AdaptiveAvgPool1d(1) if pool=="avg" else nn.AdaptiveMaxPool1d(1)
        self.classify = nn.Linear(embed_channel * factor * expansion, num_classes)
    def __init__(self):
        super(LSTMEncoder, self).__init__()
        self.input_dim = 1
        self.hidden_dim = 16
        self.nl = 1
        self.bidir = True
        self.direction = 1
        if self.bidir:
            self.direction = 2

        # Layers
        # self.pool0 = nn.MaxPool1d(10, 1)
        self.pool0 = nn.AdaptiveMaxPool1d(400)
        self.rnn = nn.LSTM(input_size=self.input_dim,
                           hidden_size=self.hidden_dim,
                           num_layers=self.nl,
                           bidirectional=self.bidir,
                           dropout=0.1,
                           batch_first=True)
        self.fc1 = Dense(32, 32)
        self.fc2 = Dense(32, 16)
        self.fc3 = Dense(16, 8)
        self.act = nn.ELU()
        self.fc_out = nn.Linear(16, 1, bias=True)
Esempio n. 25
0
    def __init__(self, word_dim, num_channels:list, kernel_sizes:list, is_batch_normal:bool):
        super(baseline_TextCNN_encoder, self).__init__()



        self.pool = nn.AdaptiveMaxPool1d(output_size=1)
        self.convs = nn.ModuleList()
        if is_batch_normal:
            for c, k in zip(num_channels, kernel_sizes):
                self.convs.append(
                    nn.Sequential(
                        nn.Conv1d(in_channels=word_dim,
                                  out_channels=c,
                                  kernel_size=k),
                        nn.BatchNorm1d(c)
                    )
                )
        else:
            for c, k in zip(num_channels, kernel_sizes):
                self.convs.append(
                    nn.Conv1d(in_channels=word_dim,
                              out_channels=c,
                              kernel_size=k)
                )
Esempio n. 26
0
 def __init__(self,
              input_size,
              filter_sizes,
              num_filter_per_size,
              activation,
              dropout=0,
              num_pool=0,
              channel_last=False):
     super(CNNEncoder, self).__init__()
     if not filter_sizes:
         raise ValueError(f'CNNEncoder expect non-empty filter_sizes. '
                          f'Got: {filter_sizes}')
     self.channel_last = channel_last
     self.convs = nn.ModuleList()
     for filter_size in filter_sizes:
         conv = nn.Conv1d(in_channels=input_size,
                          out_channels=num_filter_per_size,
                          kernel_size=filter_size)
         self.convs.append(conv)
     self.num_pool = num_pool
     if num_pool > 1:
         self.pool = nn.AdaptiveMaxPool1d(num_pool)
     self.activation = getattr(torch, activation, getattr(F, activation))
     self.dropout = nn.Dropout(dropout)
Esempio n. 27
0
 def __init__(self):
     super(NetworkCnn, self).__init__()
     """
     Create and initialise weights and biases for the layers.
     """
     self.conv1 = tnn.Conv1d(in_channels=50,
                             kernel_size=8,
                             padding=5,
                             out_channels=50)
     self.ReLU1 = tnn.ReLU()
     self.maxpool1 = tnn.MaxPool1d(4)
     self.conv2 = tnn.Conv1d(in_channels=50,
                             kernel_size=8,
                             padding=5,
                             out_channels=50)
     self.ReLU2 = tnn.ReLU()
     self.maxpool2 = tnn.MaxPool1d(4)
     self.conv3 = tnn.Conv1d(in_channels=50,
                             kernel_size=8,
                             padding=5,
                             out_channels=50)
     self.ReLU3 = tnn.ReLU()
     self.maxpoolglobal = tnn.AdaptiveMaxPool1d(50)
     self.fc1 = tnn.Linear(50 * 50, 1)
Esempio n. 28
0
    def __init__(self, word_vecs, model_type, h, feature, k, p, max_l):
        super(CNN, self).__init__()
        v = word_vecs.size()[0]

        # Embedding Layer
        self.ch = 1
        self.emb = nn.Embedding(v, k, padding_idx=0)
        if model_type != "rand":
            self.emb.weight.data.copy_(word_vecs)
            if model_type == "static":
                self.emb.weight.requires_grad = False
            elif model_type == "multichannel":
                self.emb_multi = nn.Embedding(v, k, padding_idx=0)
                self.emb_multi.weight.data.copy_(word_vecs)
                self.emb_multi.weight.requires_grad = False
                self.ch = 2

        # Convolutional Layer
        for w in h:
            conv = nn.Conv1d(self.ch, feature, w * k, stride=k)
            setattr(self, 'conv%d' % w, conv)

        # Pooling Layer
        self.pool = nn.AdaptiveMaxPool1d(1)

        # FC Layer
        self.fc = nn.Linear(len(h) * feature, 2)

        # Other Layers
        self.dropout = nn.Dropout(p)
        self.relu = nn.ReLU()

        self.h = h
        self.feature = feature
        self.k = k
        self.max_l = max_l
Esempio n. 29
0
    def __init__(self):
        super(NetworkCnn, self).__init__()
        """
        TODO:
        Create and initialise weights and biases for the layers.
        """

        self.conv1 = tnn.Conv1d(in_channels=50,
                                out_channels=50,
                                kernel_size=8,
                                padding=5)
        self.conv2 = tnn.Conv1d(in_channels=50,
                                out_channels=50,
                                kernel_size=8,
                                padding=5)
        self.conv3 = tnn.Conv1d(in_channels=50,
                                out_channels=50,
                                kernel_size=8,
                                padding=5)
        self.dropout = tnn.Dropout(p=0.3)
        self.maxpool = tnn.MaxPool1d(kernel_size=4)
        self.relu = tnn.ReLU()
        self.globalmaxpool = tnn.AdaptiveMaxPool1d(1)
        self.fc1 = tnn.Linear(50, 1)
Esempio n. 30
0
    def __init__(self, input_size,
                 output_size,
                 kernel_sizes=(1, 3, 5),
                 filter_nums=(50, 100, 150),
                 dropout=0.0):
        super(MWCNN, self).__init__()

        self.dropout = dropout

        for k in kernel_sizes:
            assert k % 2 == 1

        self.convs = nn.ModuleList([
            nn.Sequential(
                nn.Conv1d(in_channels=input_size,
                          out_channels=filter_nums[i],
                          kernel_size=k,
                          padding=k // 2),
                nn.ReLU(),
                nn.AdaptiveMaxPool1d(1)
            ) for i, k in enumerate(kernel_sizes)
        ])

        self.fc = nn.Linear(sum(filter_nums), output_size)