Пример #1
0
 def forward(self, sent):
     embeds = self.word_embeddings(sent)
     '''input cua lstm se co dang [seqlen, batch, input].'''
     lstm_out, (hn, cn) = self.lstm(embeds.view(len(sent), 1, -1))  # [seqlen, 1, embedding_dim]
     tag_space = self.hidden2tag(lstm_out.view(len(sent), -1))
     tag_scores = F.log_softmax(tag_space, dim=1)
     return tag_scores
Пример #2
0
    def forward(self, features, adj):
        x = self.conv1(features, adj)
        x = F.relu(x)
        x = F.dropout(x, self.dropout, self.training)
        x = self.conv2(x, adj)

        return F.log_softmax(x, dim=1)
Пример #3
0
    def forward(self, x: Tensor) -> Tensor:
        x = x.view(x.size(0), -1)  # Make it flat
        x = self.linear(x)
        x = torch.relu(x)

        output = F.log_softmax(x, dim=1)

        return x
Пример #4
0
 def forward(self, x):
     x = F.relu(F.max_pool2d(self.conv1(x), 2))
     x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
     x = x.view(-1, 320)
     x = F.relu(self.fc1(x))
     x = F.dropout(x, training=self.training)
     x = self.fc2(x)
     return F.log_softmax(x)
Пример #5
0
    def forward(self, x):
        x = self.conv(x)

        x = x.view(x.size(0), -1)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)

        return F.log_softmax(x, dim=1)
Пример #6
0
 def forward(self, x):
     x = F.relu(self.conv1(x))
     x = F.max_pool2d(x, 2, 2)
     x = F.relu(self.conv2(x))
     x = F.max_pool2d(x, 2, 2)
     x = x.view(-1, 4 * 4 * 50)
     x = F.relu(self.fc1(x))
     x = self.fc2(x)
     return F.log_softmax(x, dim=1)
Пример #7
0
 def forward(self, x):
     x = x.unsqueeze(1)
     in_spec = self.speclayer(x)
     out = self.initial_layer(in_spec)
     out_residual = self.layer2(out)
     batch, time = out_residual.size()[:2]
     out = out_residual.reshape(batch, time, -1)
     lstm_out, hidden = self.lstm(out)
     in_ffn = self.flatten(lstm_out)
     output = self.fc(in_ffn)
     return F.log_softmax(output, dim=1)
Пример #8
0
 def forward(self,x):
     x = self.conv1(x)
     x = self.conv2(x)
     x = self.conv3(x)
     x = self.conv4(x)
     x = self.conv5(x)
     x = self.conv6(x)
     x = self.gap(x)
     x = self.conv7(x)
     x = x.view(-1,10)
     return F.log_softmax(x, dim=-1)
Пример #9
0
    def forward(self, x):
        x = self.block1(x)
        x = self.block2(x)
        x = self.dpthwise_sep3(x)
        # y.view(y.size(0), -1)
        # print('before dil',x.shape)
        x = torch.cat((self.get_seperable(x, sep=True), self.dilation(x)), 1)
        # print('after dil',x.shape)
        x = self.block4(x)
        x = self.block5(x)

        x = x.view(-1, 10)
        return F.log_softmax(x, dim=-1)
Пример #10
0
 def forward(self, x: Tensor) -> Tensor:
     x = self.conv1(x)
     x = F.relu(x)
     x = self.conv2(x)
     x = F.relu(x)
     x = F.max_pool2d(x, 2)
     x = self.dropout1(x)
     x = torch.flatten(x, 1)
     x = self.fc1(x)
     x = F.relu(x)
     x = self.dropout2(x)
     x = self.fc2(x)
     output = F.log_softmax(x, dim=1)
     return x
Пример #11
0
    def forward(self, inputs):
        #  inputs = inputs.view(-1, 28, 28)

        embedded = self.embedding(inputs)

        self.rnn.flatten_parameters()
        out, _ = self.rnn(embedded)

        out = self.out(out)
        out = out[:, -1, :]  # at last timestep

        out = F.log_softmax(out, dim=1)

        return out
    def forward(self, sentence):
        embeds = self.word_embedding(sentence)
        # print("embeds: ", embeds)
        # print("size of embeds: ", embeds.size())
        lstm_out, (hn, cn) = self.lstm(embeds, (self.init_hidden()))
        # print("lstm_out: ", lstm_out)
        # print("size of lstm_out: ", lstm_out.size())
        tag_space = self.hidden2tag(lstm_out)
        # print("tag_space: ", tag_space)
        # print("size of tag_space: ", tag_space.size())
        tag_scores = F.log_softmax(tag_space, dim=1)
        # print("tag_scores: ", tag_scores)
        # print("size of tag_score: ", tag_scores.size())

        return tag_scores
Пример #13
0
    def forward(self, x):
        x = self.conv1(x)
        x = self.fc2(x)
        x = self.bn1(x)
        x = self.relu2(x)

        x = self.sig(x)

        x = self.fc3(x)
        x = self.pool1(x)

        bSz, _, _, _ = x.shape
        x = x.view(bSz, -1)

        return F.log_softmax(x, dim=1)
	def forward(self, x):
		x = self.conv1(x)
		x = self.bn1(x)
		x = self.relu(x)

		x = self.layer1(x)
		x = self.layer2(x)
		x = self.layer3(x)
		x = self.layer4(x)

		x = self.avgpool(x)
		x = x.view(x.size(0), -1)
		x = self.fc(x)
		x = fun.log_softmax(x, dim = 1);

		return x
Пример #15
0
    def forward(self, x):
        x = x.unsqueeze(1)
        in_spec = self.speclayer(x)
        out = self.layer1(in_spec)
        out = self.layer2(out)
        out = self.layer3(out)
        out = self.layer4(out)
        out = out.transpose(1, -1)

        # out -> (batch, time, channel*freq)
        batch, time = out.size()[:2]
        out = out.reshape(batch, time, -1)
        gru_out, hidden = self.GRU(out)
        in_dense = self.flatten(gru_out)
        out_dense = self.fc1(in_dense)
        return F.log_softmax(out_dense, dim=1)
Пример #16
0
 def forward(self,x):
     x1 = self.block1(x)
     x2 = self.block2(x1)
     x3 = self.block3(x1+x2)
     x4 = x1+x2+x3
     x4 = self.pool(x4)
     x5 = self.block4(x4)
     x6 = self.block5(x4+x5)
     x7 = self.block6(x4 + x5 + x6)
     x8 = x5 + x6 + x7
     x8 = self.pool(x8)
     x9 = self.block7(x8)
     x10 = self.block8(x8 + x9)
     x11 = self.block9(x8 + x9 + x10)
     x12 = self.block10(x11)
     x = x12.view(-1,10)
     return F.log_softmax(x, dim=-1)
Пример #17
0
    def forward(self, sent, words):
        embeds = self.word_embedding(sent)

        char_hidden_final = []

        for word in words:
            char_embeds = self.char_embedding(word)
            char_out, (char_hn, char_cn) = self.char_lstm(
                char_embeds.view(len(word), 1, -1))
            char_hidden_state_of_word = char_hn.view(-1)
            char_hidden_final.append(char_hidden_state_of_word)
        char_hidden_final = torch.stack(tuple(char_hidden_final))
        combined = torch.cat((embeds, char_hidden_final), 1)
        lstm_out, _ = self.lstm(combined.view(len(sent), 1, -1))
        tag_space = self.hidden2tag(lstm_out.view(len(sent), -1))
        tag_scores = F.log_softmax(tag_space, dim=1)
        return tag_scores
Пример #18
0
    def forward(self, inputs, hidden, encoder_outputs):
        embedded = self.embedding(inputs).view(1, 1, -1)
        embedded = self.dropout(embedded)

        attn_weights = F.softmax(self.attn(
            torch.cat((embedded[0], hidden[0]), 1)),
                                 dim=1)
        attn_applied = torch.bmm(attn_weights.unsqueeze(0),
                                 encoder_outputs.unsqueeze(0))

        output = torch.cat((embedded[0], attn_applied[0]), 1)
        output = self.attn_combine(output).unsqueeze(0)

        output = F.relu(output)
        output, hidden = self.gru(output, hidden)

        output = F.log_softmax(self.out(output[0]), dim=1)
        return output, hidden, attn_weights
Пример #19
0
    def rnn_sample(self, controller: Controller, **kwargs):
        list_result = []
        sum_log_proba = 0

        emb = controller.start_of_sequence
        h, c = controller.lstm_cell(emb)

        for i in range(self.space):
            logits = controller.decoders[i](h).squeeze(0)
            probas = F.softmax(logits, dim=0)
            log_probas = F.log_softmax(logits, dim=0)

            # result = torch.argmax(probas)
            result = torch.multinomial(probas, num_samples=1)[0]

            emb = controller.encoders[i](result.unsqueeze(0))
            h, c = controller.lstm_cell(emb, (h, c))

            list_result.append(self.ind_to_val[result.item()])
            sum_log_proba += log_probas[result.item()]

        return list_result, sum_log_proba
Пример #20
0
def cal_loss(pred, gold, PAD, smoothing='1'):
    ''' Calculate cross entropy loss, apply label smoothing if needed. '''

    gold = gold.contiguous().view(-1)

    if smoothing == '0':
        eps = 0.1
        n_class = pred.size(1)

        one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)
        one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
        log_prb = F.log_softmax(pred, dim=1)

        non_pad_mask = gold.ne(0)
        loss = -(one_hot * log_prb).sum(dim=1)
        loss = loss.masked_select(non_pad_mask).sum()  # average later
    elif smoothing == '1':
        loss = F.cross_entropy(pred, gold, ignore_index=PAD)
    else:
        # loss = F.cross_entropy(pred, gold, ignore_index=PAD)
        loss = F.cross_entropy(pred, gold)

    return loss
 def forward(self, x):
     return F.log_softmax(self.proj(x), dim=-1)
Пример #22
0
 def distillation(self, y, labels, teacher_scores, temp, alpha):
     return self.KLDivLoss(F.log_softmax(y / temp, dim=1),
                           F.softmax(teacher_scores / temp, dim=1)) * (
                               temp * temp * 2.0 * alpha) + F.cross_entropy(
                                   y, labels) * (1. - alpha)
    def _beam_decode(self, encoder_contexts, target_tensor):
        '''
        :param target_tensor: target indexes tensor of shape [B, T] where B is the batch size and T is the maximum length of the output sentence
        :param decoder_hidden: input tensor of shape [1, B, H] for start of the decoding
        :param encoder_outputs: if you are using attention mechanism you can pass encoder outputs, [T, B, H] where T is the maximum length of input sentence
        :return: decoded_batch
        '''

        beam_width = self._beam_width
        topk = 1  # how many sentence we want to generate
        decoded_batch = []
        batch_size, target_length = target_tensor.shape

        # decoding goes sentence by sentence
        for idx in range(batch_size):
            encoder_context = encoder_contexts[idx].unsqueeze(0)
            decoder_hidden = encoder_context

            # Start with the start of the target sentence
            decoder_input = target_tensor[idx][0].unsqueeze(0)

            # Number of sentence to generate
            endnodes = []
            number_required = 1  # min((topk + 1), topk - len(endnodes))

            # starting node
            node = BeamSearchNode(hiddenstate=decoder_hidden,
                                  previousNode=None,
                                  wordId=decoder_input,
                                  logProb=0,
                                  length=1,
                                  decoder_output=None)

            nodes: PriorityQueue[BeamSearchNode] = PriorityQueue()

            # start the queue
            nodes.put((node.eval(), node))
            qsize = 1

            # start beam search
            while True:
                # give up when decoding takes too long
                if qsize > 2000:
                    break

                # fetch the best node
                current_node_score, current_node = nodes.get()
                if current_node.length > target_length:
                    break

                decoder_input = current_node.word_id
                decoder_hidden = current_node.h

                if current_node.word_id.item(
                ) == self._eos_token and current_node.previous_node != None:
                    endnodes.append((current_node_score, current_node))
                    # if we reached maximum # of sentences required
                    if len(endnodes) >= number_required:
                        break
                    else:
                        continue

                # decode for one step using decoder
                decoder_output, decoder_hidden = self._internal_forward(
                    decoder_input, decoder_hidden, encoder_context)

                # PUT HERE REAL BEAM SEARCH OF TOP
                log_output = F.log_softmax(
                    decoder_output, dim=1) + current_node.log_probability
                log_probabilities, indexes = torch.topk(log_output, beam_width)
                for new_k in range(beam_width):
                    decoded_t = indexes[0][new_k].reshape(1)
                    log_probability = log_probabilities[0][new_k].item()

                    node = BeamSearchNode(hiddenstate=decoder_hidden,
                                          previousNode=current_node,
                                          wordId=decoded_t,
                                          logProb=log_probability,
                                          length=current_node.length + 1,
                                          decoder_output=decoder_output)

                    current_node_score = node.eval()
                    nodes.put((current_node_score, node))

                # increase qsize
                qsize += beam_width - 1

            # choose nbest paths, back trace them
            if len(endnodes) == 0:
                endnodes = [nodes.get() for _ in range(topk)]

            current_node_score, current_node = endnodes[0]
            utterance = torch.zeros((1, current_node.length - 1,
                                     current_node.decoder_output.shape[1]),
                                    device=self._device)
            utterance[0][-1] = current_node.decoder_output
            counter = 2
            # back trace
            while current_node.previous_node != None:
                current_node = current_node.previous_node
                if current_node.decoder_output is not None:
                    utterance[0][-counter] = current_node.decoder_output

                counter += 1

            decoded_batch.append(utterance)

        sequence_lengths = [x.shape[1] for x in decoded_batch]
        padded_decoded_batch = torch.zeros(
            (batch_size, max(sequence_lengths), self._vocabulary_size),
            dtype=torch.float,
            device=self._device)
        for i, sequence_length in enumerate(sequence_lengths):
            padded_decoded_batch[i][:sequence_length] = decoded_batch[i]

        return padded_decoded_batch, target_tensor[:, 1:]
 def forward(self, x, A):
     x = F.relu(self.conv1(x, A))
     x = self.conv2(x, A)
     return F.log_softmax(x, dim=1)
Пример #25
0
 def forward(self, x):
     # 然后再进行log_softmax操作(在softmax结果上再做多一次log运算)
     return F.log_softmax(self.proj(x), dim=-1)