Exemple #1
0
 def build_model(self):
     # Remark: Share weights for embedding is not supported.
     # Thus here the model takes concatenated input and slice to split the input.
     input = Input(name='input',
                   shape=(self.text1_length + self.text2_length, ))
     embedding = Embedding(self.vocab_size,
                           self.embed_size,
                           weights=self.embed_weights,
                           trainable=self.train_embed)(input)
     query_embed = embedding.slice(1, 0, self.text1_length)
     doc_embed = embedding.slice(1, self.text1_length, self.text2_length)
     mm = A.batch_dot(query_embed, doc_embed,
                      axes=[2, 2])  # Translation Matrix.
     KM = []
     for i in range(self.kernel_num):
         mu = 1. / (self.kernel_num - 1) + (2. * i) / (self.kernel_num -
                                                       1) - 1.0
         sigma = self.sigma
         if mu > 1.0:  # Exact match.
             sigma = self.exact_sigma
             mu = 1.0
         mm_exp = A.exp(-0.5 * (mm - mu) * (mm - mu) / sigma / sigma)
         mm_doc_sum = A.sum(mm_exp, 2)
         mm_log = A.log(mm_doc_sum + 1.0)
         # Remark: Keep the reduced dimension for the last sum and squeeze after stack.
         # Otherwise, when batch=1, the output will become a Scalar not compatible for stack.
         mm_sum = A.sum(mm_log, 1, keepDims=True)
         KM.append(mm_sum)
     Phi = Squeeze(2)(A.stack(KM, 1))
     output = Dense(1, init="uniform", activation="sigmoid")(Phi)
     model = Model(input=input, output=output)
     return model
Exemple #2
0
 def _to_tensor(self):
     input = self.model_inputs[0].zvalue
     assert len(self.onnx_attr['axes']
                ) == 1, "we only support axes with 1 elements for now"
     axes = self.onnx_attr['axes'][0]
     keepdims = True if self.onnx_attr['keepdims'] == 1 else False
     return autograd.sum(input, axis=int(axes), keepDims=keepdims)
Exemple #3
0
    def build(self):
        def Kernel_layer(mu, sigma):
            def kernel(x):
                return A.exp(-0.5 * (x - mu) * (x - mu) / sigma / sigma)

            return A.Lambda(lambda x: kernel(x))  #Activation(kernel)

        input = klayers1.Input(name='input', shape=(50, ))
        query = input.slice(
            1, 0, 10
        )  #klayers1.Input(name='query', shape=(self.config['text1_maxlen'],))
        #show_layer_info('Input', query)
        doc = input.slice(
            1, 10, 40
        )  #klayers1.Input(name='doc', shape=(self.config['text2_maxlen'],))
        #show_layer_info('Input', doc)
        embedding = klayers1.Embedding(
            self.config['vocab_size'],
            self.config['embed_size'],
            name="embedding"
        )  # weights=[self.config['embed']], trainable=self.config['train_embed'],
        q_embed, d_embed = self.share(embedding, query, doc)
        #show_layer_info('Embedding', q_embed)
        #show_layer_info('Embedding', d_embed)
        mm = A.batch_dot(q_embed, d_embed, axes=[2, 2], normalize=False)
        #show_layer_info('Dot', mm)

        KM = []
        for i in range(self.config['kernel_num']):
            mu = 1. / (self.config['kernel_num'] -
                       1) + (2. * i) / (self.config['kernel_num'] - 1) - 1.0
            sigma = self.config['sigma']
            if mu > 1.0:
                sigma = self.config['exact_sigma']
                mu = 1.0
            mm_exp = Kernel_layer(mu, sigma)(mm)
            #show_layer_info('Exponent of mm:', mm_exp)
            mm_doc_sum = A.Lambda(lambda x: A.sum(x, 2))(
                mm_exp)  #reduce_sum(x, 2)
            #show_layer_info('Sum of document', mm_doc_sum)
            mm_log = A.Lambda(lambda x: A.log(x + 1.0))(mm_doc_sum)
            #show_layer_info('Logarithm of sum', mm_log)
            mm_sum = A.Lambda(lambda x: A.sum(x, 1))(mm_log)
            #show_layer_info('Sum of all exponent', mm_sum)
            KM.append(mm_sum)

        KMStack = A.stack(KM, 1)
        Phi = A.Lambda(lambda x: x)(KMStack)
        #show_layer_info('Stack', Phi)
        if self.config['target_mode'] == 'classification':
            out_ = Dense(2,
                         activation='softmax',
                         bias_initializer='zeros',
                         name="dense")(Phi)
        elif self.config['target_mode'] in ['regression', 'ranking']:
            out_ = Dense(1, bias_initializer='zero', name="dense")(Phi)
        #show_layer_info('Dense', out_)

        #timedistributed = layer1.TimeDistributed(out_, input_shape=(10, 12))
        model = Model(input=input, output=[out_])
        return model