Example #1
0
    def forward(self, features):
        def FC(inputs, name, i, act):
            return L.fc(inputs,
                        self.hidden_size,
                        act=act,
                        param_attr=F.ParamAttr(
                            name='%s.fc.w_%d' % (name, i),
                            initializer=F.initializer.XavierInitializer(
                                fan_in=self.hidden_size,
                                fan_out=self.hidden_size)),
                        bias_attr=F.ParamAttr(
                            name='%s.fc.b_%d' % (name, i),
                            initializer=F.initializer.Constant(0.)))

        title_ids, comment_ids = features

        embedding_attr = F.ParamAttr(
            name='emb',
            initializer=F.initializer.XavierInitializer(
                fan_in=self.vocab_size, fan_out=self.embedding_size))

        title_encoded = L.embedding(title_ids,
                                    [self.vocab_size, self.embedding_size],
                                    param_attr=embedding_attr)
        comment_encoded = L.embedding(comment_ids,
                                      [self.vocab_size, self.embedding_size],
                                      param_attr=embedding_attr)

        # Vsum
        zero = L.fill_constant(shape=[1], dtype='int64', value=0)
        title_pad = L.cast(L.logical_not(L.equal(title_ids, zero)), 'float32')
        comment_pad = L.cast(L.logical_not(L.equal(comment_ids, zero)),
                             'float32')

        title_encoded = L.reduce_sum(title_encoded * title_pad, dim=1)
        title_encoded = L.softsign(title_encoded)
        comment_encoded = L.reduce_sum(comment_encoded * comment_pad, dim=1)
        comment_encoded = L.softsign(comment_encoded)

        for i in range(self.num_layers):
            title_encoded = FC(title_encoded, 'title', i, 'tanh')

        for i in range(self.num_layers):
            comment_encoded = FC(comment_encoded, 'comment', i, 'tanh')

        score = L.reduce_sum(title_encoded * comment_encoded,
                             dim=1,
                             keep_dim=True) / np.sqrt(self.hidden_size)
        if self.mode is propeller.RunMode.PREDICT:
            probs = L.sigmoid(score)
            return probs
        else:
            return score
Example #2
0
 def test_softsign(self):
     program = Program()
     with program_guard(program):
         input = layers.data(name="input", shape=[16], dtype="float32")
         out = layers.softsign(input, name='softsign')
         self.assertIsNotNone(out)
     print(str(program))
Example #3
0
    def add_input(self, x_t, speaker_embed=None):
        """
        Takes a step of inputs and return a step of outputs. It works similarily with the `forward` method, but in a `step-in-step-out` fashion.

        Args:
            x_t (Variable): shape(B, C_in, T=1), dtype float32, the input of Conv1DGLU layer, where B means batch_size, C_in means the input channels.
            speaker_embed (Variable): Shape(B, C_sp), dtype float32, speaker embed, where C_sp means speaker embedding size. 

        Returns:
            x (Variable): shape(B, C_out), the output of Conv1DGLU, where C_out means the `num_filter`.
        """
        residual = x_t
        x_t = F.dropout(x_t,
                        self.dropout,
                        dropout_implementation="upscale_in_train")
        x_t = self.conv.add_input(x_t)
        content_t, gate_t = F.split(x_t, num_or_sections=2, dim=1)

        if speaker_embed is not None:
            sp = F.softsign(self.fc(speaker_embed))
            content_t = F.elementwise_add(content_t, sp, axis=0)

        # glu
        x_t = F.sigmoid(gate_t) * content_t

        if self.residual:
            x_t = F.scale(x_t + residual, np.sqrt(0.5))
        return x_t
Example #4
0
    def forward(self, x, speaker_embed=None):
        """
        Args:
            x (Variable): shape(B, C_in, T), dtype float32, the input of Conv1DGLU layer, where B means batch_size, C_in means the input channels T means input time steps.
            speaker_embed (Variable): shape(B, C_sp), dtype float32, speaker embed, where C_sp means speaker embedding size.

        Returns:
            x (Variable): shape(B, C_out, T), the output of Conv1DGLU, where
                C_out means the `num_filters`.
        """
        residual = x
        x = F.dropout(x,
                      self.dropout,
                      dropout_implementation="upscale_in_train")
        x = self.conv(x)
        content, gate = F.split(x, num_or_sections=2, dim=1)

        if speaker_embed is not None:
            sp = F.softsign(self.fc(speaker_embed))
            content = F.elementwise_add(content, sp, axis=0)

        # glu
        x = F.sigmoid(gate) * content

        if self.residual:
            x = F.scale(x + residual, np.sqrt(0.5))
        return x
Example #5
0
    def forward(self, input, bias=None, padding=None):
        """
        input: input feature (B, T, C)
        padding: only used when using causal conv, we pad mannually
        """
        input_dropped = F.dropout(input,
                                  1. - self.keep_prob,
                                  dropout_implementation="upscale_in_train")
        if self.causal:
            assert padding is not None
            input_dropped = F.concat([padding, input_dropped], axis=1)
        hidden = self.conv(input_dropped)

        if self.has_bias:
            assert bias is not None
            transformed_bias = F.softsign(self.bias_affine(bias))
            hidden_embedded = hidden + F.unsqueeze(transformed_bias, [1])
        else:
            hidden_embedded = hidden

        # glu
        content, gate = F.split(hidden, num_or_sections=2, dim=-1)
        content = hidden_embedded[:, :, :self.in_channel]
        hidden = F.sigmoid(gate) * content

        # # residual
        hidden = F.scale(input + hidden, math.sqrt(0.5))
        return hidden
Example #6
0
    def forward(self, text, text_lengths, speakers=None, mel=None, frame_lengths=None, 
                force_monotonic_attention=None, window=None):
        # encode
        text_embed = self.char_embedding(text)# no stress embedding here
        speaker_embed = F.softsign(self.speaker_embedding(speakers)) if self.speaker_embedding is not None else None
        keys, values = self.encoder(text_embed, speaker_embed)

        if mel is not None:
            return self.teacher_forced_train(keys, values, text_lengths, speaker_embed, mel)
        else:
            return self.inference(keys, values, text_lengths, speaker_embed, force_monotonic_attention, window)
Example #7
0
 def forward(self, input, bias=None):
     """
     input -> (affine + weight_norm) ->hidden
     bias -> (affine) -> softsign -> transformed_bis
     hidden += transformed_bias
     """
     hidden = self.affine(input)
     if self.has_bias:
         assert bias is not None
         transformed_bias = F.softsign(self.bias_affine(bias))
         hidden += F.unsqueeze(transformed_bias, [1])
     return hidden
Example #8
0
        def bow(ids):
            embed = L.embedding(
                input=ids,
                size=[self.config.vocab_size, self.config.emb_size],
                dtype=self._emb_dtype,
                param_attr=F.ParamAttr(name=self._word_emb_name,
                                       initializer=self._param_initializer),
                is_sparse=False)

            zero = L.fill_constant(shape=[1], dtype='int64', value=0)
            pad = L.cast(L.logical_not(L.equal(ids, zero)), 'float32')
            sumed = L.reduce_sum(embed * pad, dim=1)
            sumed = L.softsign(sumed)
            return sumed
Example #9
0
    def forward(self, ids, labels=None):
        embbed = self.emb(ids)
        pad_mask = L.unsqueeze(L.cast(ids != 0, 'float32'), [-1])

        embbed = L.reduce_sum(embbed * pad_mask, 1)
        embbed = L.softsign(embbed)
        logits = self.fc(embbed)
        if labels is not None:
            if len(labels.shape) == 1:
                labels = L.reshape(labels, [-1, 1])
            loss = L.softmax_with_cross_entropy(logits, labels)
            loss = L.reduce_mean(loss)
        else:
            loss = None
        return loss, logits
Example #10
0
    def listwise_loss(self, args):
        """listwise model"""
        self.logits = L.matmul(
            self.query_repr, self.poi_repr, transpose_y=True)
        if self.norm_score:
            self.logits = L.softsign(self.logits)

        if args.scale_softmax:
            scale = L.create_parameter(shape=[1], dtype="float32", name="final_scale", default_initializer=F.initializer.ConstantInitializer(value=1.0))
            bias = L.create_parameter(shape=[1], dtype="float32", name="final_bias", default_initializer=F.initializer.ConstantInitializer(value=0.0))
            self.logits = self.logits * scale * scale + bias

        self.score = L.softmax(self.logits)
        self.loss = L.softmax_with_cross_entropy(self.logits, self.labels)
        self.loss = L.reduce_mean(self.loss)
        self.acc = L.accuracy(L.softmax(self.logits), self.labels)
        self.metrics = [self.loss, self.acc]
Example #11
0
 def forward(self, input, bias=None):
     """
     input -> (dropout) ->hidden
     bias -> (affine) -> softsign -> transformed_bis
     hidden += transformed_bias
     hidden -> (affine + weight_norm) -> relu -> hidden
     """
     hidden = input
     if self.dropout:
         hidden = F.dropout(hidden, 1. - self.keep_prob,
                            dropout_implementation="upscale_in_train")
     if self.has_bias:
         assert bias is not None
         transformed_bias = F.softsign(self.bias_affine(bias))
         hidden += F.unsqueeze(transformed_bias, [1])
     hidden = F.relu(self.affine(hidden))
     return hidden
Example #12
0
 def forward(self, ids, labels=None):
     embbed = self.emb(ids)
     #d_batch, d_seqlen = ids.shape
     hidden = embbed
     hidden = L.transpose(hidden, [0, 2, 1])  #change to NCWH
     hidden = L.unsqueeze(hidden, [2])
     hidden = self.cnn(hidden)
     hidden = self.pool(hidden)
     hidden = L.squeeze(hidden, [2])
     hidden = L.transpose(hidden, [0, 2, 1])
     pad_mask = L.unsqueeze(L.cast(ids != 0, 'float32'), [-1])
     hidden = L.softsign(L.reduce_sum(hidden * pad_mask, 1))
     logits = self.fc(hidden)
     if labels is not None:
         if len(labels.shape) == 1:
             labels = L.reshape(labels, [-1, 1])
         loss = L.softmax_with_cross_entropy(logits, labels)
         loss = L.reduce_mean(loss)
     else:
         loss = None
     return loss, logits
Example #13
0
    def prepare_emb(self, feature_group, feature_info, out_size=0):
        """
        prepare embedding
        """
        embs = []
        for (i, feature) in enumerate(feature_info):
            emb = layers.embedding(input=feature_group[i],
                                   param_attr=fluid.ParamAttr(name='%s_emb' %
                                                              feature[1]),
                                   size=self.feature_voc_num_dict[feature[1]],
                                   is_sparse=True)
            embs.append(emb)
        concat_emb = layers.concat(embs, axis=1)
        concat_emb = layers.softsign(concat_emb)

        if out_size > 0:
            concat_emb = layers.fc(
                input=concat_emb,
                size=out_size,
                param_attr=fluid.ParamAttr(learning_rate=self.fc_lr),
                act='relu')

        return concat_emb