Example #1
0
 def test_logsigmoid(self):
     program = Program()
     with program_guard(program):
         input = layers.data(name="input", shape=[16], dtype="float32")
         out = layers.logsigmoid(input, name='logsigmoid')
         self.assertIsNotNone(out)
     print(str(program))
Example #2
0
def build_model(args, graph):
    """Build LINE model.

    Args:
        args: The hyperparameters for configure.
    
        graph: The :code:`Graph` data object.
        
    """
    u_i = fl.data(name='u_i',
                  shape=[None, 1],
                  dtype='int64',
                  append_batch_size=False)
    u_j = fl.data(name='u_j',
                  shape=[None, 1],
                  dtype='int64',
                  append_batch_size=False)

    label = fl.data(name='label',
                    shape=[None],
                    dtype='float32',
                    append_batch_size=False)

    lr = fl.data(name='learning_rate',
                 shape=[1],
                 dtype='float32',
                 append_batch_size=False)

    u_i_embed = fl.embedding(input=u_i,
                             size=[graph.num_nodes, args.embed_dim],
                             param_attr='shared_w')

    if args.order == 'first_order':
        u_j_embed = fl.embedding(input=u_j,
                                 size=[graph.num_nodes, args.embed_dim],
                                 param_attr='shared_w')
    elif args.order == 'second_order':
        u_j_embed = fl.embedding(input=u_j,
                                 size=[graph.num_nodes, args.embed_dim],
                                 param_attr='context_w')
    else:
        raise ValueError(
            "order should be first_order or second_order, not %s" %
            (args.order))

    inner_product = fl.reduce_sum(u_i_embed * u_j_embed, dim=1)

    loss = -1 * fl.reduce_mean(fl.logsigmoid(label * inner_product))
    optimizer = fluid.optimizer.RMSPropOptimizer(learning_rate=lr)
    train_op = optimizer.minimize(loss)

    return loss, optimizer
Example #3
0
    def forward(self):
        """Build the skipgram model.
        """
        initrange = 1.0 / self.config['embed_dim']
        embed_init = fluid.initializer.UniformInitializer(low=-initrange,
                                                          high=initrange)
        weight_init = fluid.initializer.TruncatedNormal(
            scale=1.0 / math.sqrt(self.config['embed_dim']))

        embed_src = fl.embedding(
            input=self.train_inputs,
            size=[self.num_nodes, self.config['embed_dim']],
            param_attr=fluid.ParamAttr(name='content', initializer=embed_init))

        weight_pos = fl.embedding(
            input=self.train_labels,
            size=[self.num_nodes, self.config['embed_dim']],
            param_attr=fluid.ParamAttr(name='weight', initializer=weight_init))

        weight_negs = fl.embedding(
            input=self.train_negs,
            size=[self.num_nodes, self.config['embed_dim']],
            param_attr=fluid.ParamAttr(name='weight', initializer=weight_init))

        pos_logits = fl.matmul(embed_src, weight_pos,
                               transpose_y=True)  # [batch_size, 1, 1]

        pos_score = fl.squeeze(pos_logits, axes=[1])
        pos_score = fl.clip(pos_score, min=-10, max=10)
        pos_score = -self.neg_num * fl.logsigmoid(pos_score)

        neg_logits = fl.matmul(embed_src, weight_negs,
                               transpose_y=True)  # [batch_size, 1, neg_num]
        neg_score = fl.squeeze(neg_logits, axes=[1])
        neg_score = fl.clip(neg_score, min=-10, max=10)
        neg_score = -1.0 * fl.logsigmoid(-1.0 * neg_score)
        neg_score = fl.reduce_sum(neg_score, dim=1, keep_dim=True)

        self.loss = fl.reduce_mean(pos_score + neg_score) / self.neg_num / 2
Example #4
0
    def forward(self):
        """Build the GATNE net.
        """
        param_attr_init = fluid.initializer.Uniform(
            low=-1.0, high=1.0, seed=np.random.randint(100))
        embed_param_attrs = fluid.ParamAttr(name='Base_node_embed',
                                            initializer=param_attr_init)

        # node_embeddings
        base_node_embed = fl.embedding(
            input=fl.reshape(self.train_inputs, shape=[-1, 1]),
            size=[self.num_nodes, self.embedding_size],
            param_attr=embed_param_attrs)

        node_features = []
        for edge_type in self.edge_types:
            param_attr_init = fluid.initializer.Uniform(
                low=-1.0, high=1.0, seed=np.random.randint(100))
            embed_param_attrs = fluid.ParamAttr(name='%s_node_embed' %
                                                edge_type,
                                                initializer=param_attr_init)

            features = fl.embedding(
                input=self.gw[edge_type].node_feat['index'],
                size=[self.num_nodes, self.embedding_u_size],
                param_attr=embed_param_attrs)

            node_features.append(features)

        # mp_output: list of embedding(self.num_nodes, dim)
        mp_output = self.message_passing(self.gw, self.edge_types,
                                         node_features)

        # U : (num_type[m], num_nodes, dim[s])
        node_type_embed = fl.stack(mp_output, axis=0)

        # U : (num_nodes, num_type[m], dim[s])
        node_type_embed = fl.transpose(node_type_embed, perm=[1, 0, 2])

        #gather node_type_embed from train_inputs
        node_type_embed = fl.gather(node_type_embed, self.train_inputs)

        # M_r
        trans_weights = fl.create_parameter(
            shape=[
                self.edge_type_count, self.embedding_u_size,
                self.embedding_size // self.att_head
            ],
            attr=fluid.initializer.TruncatedNormalInitializer(
                loc=0.0, scale=1.0 / math.sqrt(self.embedding_size)),
            dtype='float32',
            name='trans_w')

        # W_r
        trans_weights_s1 = fl.create_parameter(
            shape=[self.edge_type_count, self.embedding_u_size, self.dim_a],
            attr=fluid.initializer.TruncatedNormalInitializer(
                loc=0.0, scale=1.0 / math.sqrt(self.embedding_size)),
            dtype='float32',
            name='trans_w_s1')

        # w_r
        trans_weights_s2 = fl.create_parameter(
            shape=[self.edge_type_count, self.dim_a, self.att_head],
            attr=fluid.initializer.TruncatedNormalInitializer(
                loc=0.0, scale=1.0 / math.sqrt(self.embedding_size)),
            dtype='float32',
            name='trans_w_s2')

        trans_w = fl.gather(trans_weights, self.train_types)
        trans_w_s1 = fl.gather(trans_weights_s1, self.train_types)
        trans_w_s2 = fl.gather(trans_weights_s2, self.train_types)

        attention = self.attention(node_type_embed, trans_w_s1, trans_w_s2)
        node_type_embed = fl.matmul(attention, node_type_embed)
        node_embed = base_node_embed + fl.reshape(
            fl.matmul(node_type_embed, trans_w), [-1, self.embedding_size])

        self.last_node_embed = fl.l2_normalize(node_embed, axis=1)

        nce_weight_initializer = fluid.initializer.TruncatedNormalInitializer(
            loc=0.0, scale=1.0 / math.sqrt(self.embedding_size))
        nce_weight_attrs = fluid.ParamAttr(name='nce_weight',
                                           initializer=nce_weight_initializer)

        weight_pos = fl.embedding(input=self.train_labels,
                                  size=[self.num_nodes, self.embedding_size],
                                  param_attr=nce_weight_attrs)
        weight_neg = fl.embedding(input=self.train_negs,
                                  size=[self.num_nodes, self.embedding_size],
                                  param_attr=nce_weight_attrs)
        tmp_node_embed = fl.unsqueeze(self.last_node_embed, axes=[1])
        pos_logits = fl.matmul(tmp_node_embed, weight_pos,
                               transpose_y=True)  # [B, 1, 1]

        neg_logits = fl.matmul(tmp_node_embed, weight_neg,
                               transpose_y=True)  # [B, 1, neg_num]

        pos_score = fl.squeeze(pos_logits, axes=[1])
        pos_score = fl.clip(pos_score, min=-10, max=10)
        pos_score = -1.0 * fl.logsigmoid(pos_score)

        neg_score = fl.squeeze(neg_logits, axes=[1])
        neg_score = fl.clip(neg_score, min=-10, max=10)
        neg_score = -1.0 * fl.logsigmoid(-1.0 * neg_score)

        neg_score = fl.reduce_sum(neg_score, dim=1, keep_dim=True)
        self.loss = fl.reduce_mean(pos_score + neg_score)