Esempio n. 1
0
 def build_graph(self, tensor):
     with tf.variable_scope('linear1'):
         tensor = utils.linear_layer(tensor, 128)
     with tf.variable_scope('linear2'):
         tensor = utils.linear_layer(tensor, 1, activate=tf.nn.sigmoid)
         # tensor = utils.linear_layer(tensor, 1, activate=lambda x:x)
     return tensor
Esempio n. 2
0
 def build_graph(self, tensor):
     with tf.variable_scope('linear1'):
         tensor = utils.linear_layer(tensor, 128)
         # with tf.variable_scope('linear2'):
         #     tensor = utils.linear_layer(tensor, 500)
     with tf.variable_scope('linear3'):
         tensor = utils.linear_layer(tensor, self.example_shape, tf.sigmoid)
     return tensor
Esempio n. 3
0
 def build_graph(self, tensor):
     with tf.variable_scope('linear1'):
         tensor = utils.linear_layer(tensor, 128)
     # with tf.variable_scope('linear2'):
     #     tensor = utils.linear_layer(tensor,500)
     with tf.variable_scope('linear3'):
         tensor = utils.linear_layer(tensor, FLAGS.latent_dims, lambda x: x)
     return tensor
Esempio n. 4
0
 def build_graph(self,tensor):
     with tf.variable_scope('linear1'):
         tensor = utils.linear_layer(tensor,128)
     with tf.variable_scope('linear2'):
         tensor = utils.linear_layer(tensor,self.example_shape,activate=tf.nn.sigmoid)
     image = tf.reshape(tensor,shape=[self.batch_size,self.image_w,self.image_h,self.channel])
     tf.summary.image('out_image',image)
     return tensor
Esempio n. 5
0
 def build_graph(self, tensor):
     with tf.variable_scope('linear1'):
         tensor = utils.linear_layer(tensor, 128)
     with tf.variable_scope('linear2'):
         tensor = utils.linear_layer(tensor,
                                     self.cat_num,
                                     activate=lambda x: x)
     return tensor
Esempio n. 6
0
 def build_graph(self,tensor):
     with tf.variable_scope('linear1'):
         tensor = utils.linear_layer(tensor,128)
     with tf.variable_scope('linear'):
         tensor = utils.linear_layer(tensor,1,activate=tf.nn.sigmoid)
         # in wiseodd's github, the ac_layer did't use sigmoid as a activate function,
         # but in my experiment, if no sigmoid function,the network can't work, I don't know why
         ac_tensor = utils.linear_layer(tensor,10,activate=tf.nn.sigmoid,name='ac')
     return tensor,ac_tensor
Esempio n. 7
0
    def build_decoder(self, tensor):
        with tf.variable_scope('decode_linear_1'):
            tensor = linear_layer(tensor, self.hidden_dims[0], tf.nn.softplus)
        with tf.variable_scope('decode_linear_2'):
            tensor = linear_layer(tensor, self.hidden_dims[1], tf.nn.softplus)
        with tf.variable_scope('decoder_linear_4'):
            tensor = linear_layer(tensor,
                                  self.image_size,
                                  activate=tf.nn.sigmoid)

        return tensor
Esempio n. 8
0
    def builde_encode(self, input):
        # output shape 64*28*28*64
        input = tf.reshape(
            input,
            shape=[self.batch_size, self.image_w, self.image_h, self.channel])
        tf.summary.image('input_image', input)
        with tf.variable_scope('conv_layer1'):
            tensor = conv_layer(input,
                                filters=self.filters[0],
                                k_size=self.kernel[0],
                                strides=1)

        # output shape = 64*14*14*64
        with tf.variable_scope('max_pool_layer_1'):
            ksize = [1, 5, 5, 1]
            strides = [1, 2, 2, 1]
            tensor = tf.nn.max_pool(tensor,
                                    ksize=ksize,
                                    strides=strides,
                                    padding='SAME')
        # output shape = 64*14*14*64
        with tf.variable_scope('conv_layer2'):
            tensor = conv_layer(tensor,
                                self.filters[1],
                                self.kernel[1],
                                strides=1)
        # output shape = 64*7*7*64
        with tf.variable_scope('max_pool_layer_1'):
            ksize = [1, 5, 5, 1]
            strides = [1, 2, 2, 1]
            tensor = tf.nn.max_pool(tensor,
                                    ksize=ksize,
                                    strides=strides,
                                    padding='SAME')
            # output_size = tf.shape(pool)[1,2]
        tensor = tf.reshape(tensor, [self.batch_size, -1])

        with tf.variable_scope('linear'):
            tensor = linear_layer(tensor, self.linear_units)

        with tf.variable_scope('latent'):
            tensor = linear_layer(tensor, self.variation_dim, lambda x: x)
            tensor2 = linear_layer(tensor,
                                   self.variation_dim,
                                   lambda x: x,
                                   name='2')
        return tensor, tensor2
Esempio n. 9
0
    def build_graph(self,tensor):
        with tf.variable_scope('G_linear1'):
            tensor = utils.linear_layer(tensor,self.linear_units[0])
            # tensor = tf.reshape(tensor,[self.batch_size,7,7,1])
        # with tf.variable_scope('G_deconv_1'):
        #     tensor = utils.de_conv_layer(tensor,32,[3,3],2)
        # with tf.variable_scope('G_deconv_2'):
        #     tensor = utils.de_conv_layer(tensor,1,[3,3],2,activate=tf.nn.sigmoid)

        # with tf.variable_scope('G_linear2'):
        #     tensor = utils.linear_layer(tensor,self.linear_units[1])
        with tf.variable_scope('G_linear3'):
            tensor = utils.linear_layer(tensor,self.image_w*self.image_h*self.channel,activate=tf.nn.sigmoid)
        image = tf.reshape(tensor, shape=(self.batch_size, self.image_w, self.image_h, self.channel))
        tf.summary.image('output_image', image)
        tensor = tf.reshape(tensor,[tensor.shape[0],-1])
        return tensor
Esempio n. 10
0
 def build_graph(self,tensor):
     with tf.variable_scope('D_linear1',reuse=tf.AUTO_REUSE):
         tensor = utils.linear_layer(tensor,self.linear_units[0],lambda x :tf.nn.leaky_relu(x,0.5))
     with tf.variable_scope('D_linear2',reuse=tf.AUTO_REUSE):
         tensor = utils.linear_layer(tensor,self.linear_units[1],lambda x :tf.nn.leaky_relu(x,0.5))
     # with tf.variable_scope('D_soft_max',reuse=tf.AUTO_REUSE):
     #     logits = utils.linear_layer(tensor,1,activate=tf.nn.sigmoid)
     # tensor = tf.reshape(tensor,[tensor.shape[0],FLAGS.image_w,FLAGS.image_h,1])
     # with tf.variable_scope('conv1'):
     #     tensor = utils.conv_layer(tensor,64,[3,3],1)
     # with tf.variable_scope('conv2'):
     #     tensor = utils.conv_layer(tensor,64,[3,3],1)
     # tensor = tf.reshape(tensor,[tensor.shape[0],-1])
     with tf.variable_scope('linear'):
         # logits = utils.linear_layer(tensor,1,lambda x:x)
         logits = utils.linear_layer(tensor, 1, tf.nn.sigmoid)
     return logits
Esempio n. 11
0
    def builde_encode(self, input):
        # output shape 64*28*28*24
        with tf.variable_scope('linear1'):
            tensor = linear_layer(input, self.hidden_dims[0], tf.nn.softplus)
            # tensor2 = linear_layer(input,500,tf.nn.softplus,name='2')
        with tf.variable_scope('linear2'):
            tensor = linear_layer(tensor, self.hidden_dims[1], tf.nn.softplus)
            # tensor2 =linear_layer(tensor2,500,tf.nn.softplus,name='2')
        # with tf.variable_scope('linear3'):
        #     tensor = linear_layer(tensor, 198)
        #     tensor2 = linear_layer(tensor2, 198, name='2')

        with tf.variable_scope('linear4'):
            tensor = linear_layer(tensor, self.variation_dim, lambda x: x)

            tensor2 = linear_layer(tensor,
                                   self.variation_dim,
                                   lambda x: x,
                                   name='2')
        return tensor, tensor2
Esempio n. 12
0
    def build_decoder(self, z):

        # output_shape = 64*14*14*6
        tensor = z
        # with tf.variable_scope('decoder_linear'):
        #     tensor = linear_layer(tensor,self.linear_units)

        with tf.variable_scope('decoder_linear_1'):
            tensor = linear_layer(
                tensor, self.e_size[1] * self.e_size[2] * self.e_size[3])

        tensor = tf.reshape(tensor, self.e_size)
        with tf.variable_scope('deconv_layer1'):
            filter_shape = [
                self.kernel[0][0], self.kernel[0][1], self.filters[0],
                self.in_channel
            ]
            weights = tf.get_variable(name='weight',
                                      shape=filter_shape,
                                      dtype=tf.float32,
                                      initializer=tf.random_normal_initializer(
                                          -0.06, 0.06))
            strides = [1, 2, 2, 1]
            tensor = tf.nn.conv2d_transpose(tensor,
                                            weights,
                                            output_shape=self.deconv_size[0],
                                            strides=strides)
            bias = tf.get_variable(name='bias',
                                   shape=[self.deconv_size[0][-1]])
            tensor = tf.nn.relu(tf.nn.bias_add(tensor, bias))
        with tf.variable_scope('deconv_layer2'):
            filter_shape = [
                self.kernel[1][0], self.kernel[1][1], self.filters[1],
                self.filters[0]
            ]
            weights = tf.get_variable(name='weight',
                                      shape=filter_shape,
                                      dtype=tf.float32,
                                      initializer=tf.random_normal_initializer(
                                          -0.06, 0.06))
            strides = [1, 2, 2, 1]
            tensor = tf.nn.conv2d_transpose(tensor,
                                            weights,
                                            output_shape=self.deconv_size[1],
                                            strides=strides)
            bias = tf.get_variable(name='bias',
                                   shape=[self.deconv_size[1][-1]])
            image = tf.nn.sigmoid(tf.nn.bias_add(tensor, bias))
            # print(image.shape)

        return image
Esempio n. 13
0
    def __init__(self,user_num,item_num,entity_num,relation_num,n_layer,embed_dim,hidden_layers,dropouts,output_rec):

        super(MultiKR,self).__init__()
        self.user_embed = nn.Embedding(user_num,embed_dim)
        self.item_embed = nn.Embedding(item_num,embed_dim)
        self.entity_embed = nn.Embedding(entity_num,embed_dim)
        self.relation_embed = nn.Embedding(relation_num,embed_dim)
        self.n_layer = n_layer
        self.w_vv = torch.rand((embed_dim,1),requires_grad=True)
        self.w_ev = torch.rand((embed_dim,1),requires_grad=True)
        self.w_ve = torch.rand((embed_dim,1),requires_grad=True)
        self.w_ee = torch.rand((embed_dim,1),requires_grad=True)
        self.bais_v = torch.rand(1,requires_grad=True)
        self.bais_e = torch.rand(1,requires_grad=True)

        # self.user_embed = self.user_embed.to(device)
        # self.item_embed = self.item_embed.to(device)
        # self.entity_embed = self.entity_embed.to(device)
        # self.relation_embed = self.relation_embed.to(device)
        self.w_vv = self.w_vv.to(device)
        self.w_ev = self.w_ev.to(device)
        self.w_ve = self.w_ve.to(device)
        self.w_ee = self.w_ee.to(device)
        self.bais_v = self.bais_v.to(device)
        self.bais_e = self.bais_e.to(device)

        #mlp for low layer
        self.user_low_mlp_layer = linear_layer(embed_dim,embed_dim,dropout=0.5)
        self.relation_low_mlp_layer =linear_layer(embed_dim,embed_dim,dropout=0.5)
        # mlp for kge
        self.kg_layer = nn.Sequential()
        layers =[2*embed_dim]+hidden_layers
        for i in range(len(layers)-1):
            self.kg_layer.add_module(
                'kg_hidden_layer_{}'.format(i+1),
                linear_layer(layers[i],layers[i+1],dropouts[i])
            )
        self.kg_layer.add_module('kg_last_layer',linear_layer(layers[-1],embed_dim))
        # mlp for recommad
        self.rec_layer=nn.Sequential()
        layers=[2*embed_dim]+hidden_layers
        for i in range(len(layers)-1):
            self.rec_layer.add_module(
                'rec_hidden_layer_{}'.format(i+1),
                linear_layer(layers[i],layers[i+1],dropouts[i])
            )
        self.rec_layer.add_module('rec_last_layer',linear_layer(layers[-1],output_rec))
Esempio n. 14
0
    def __init__(self, user_num, item_num, entity_num, relation_num, n_layer,
                 embed_dim, hidden_layers, dropouts, output_rec):
        """

        :param user_num:
        :param item_num:
        :param entity_num:
        :param relation_num:
        :param n_layer:
        :param embed_dim:
        :param hidden_layers:
        :param dropouts:
        """
        super(MultiKR, self).__init__()

        # user embedding
        self.user_embed = nn.Embedding(user_num, embed_dim)

        # item embedding
        self.item_embed = nn.Embedding(item_num, embed_dim)

        # entity embedding
        self.entity_embed = nn.Embedding(entity_num, embed_dim)

        # relation embedding
        self.relation_embed = nn.Embedding(relation_num, embed_dim)

        # low mlp layer number
        self.n_layer = n_layer

        # compress vector
        self.compress_weight_vv = torch.rand((embed_dim, 1),
                                             requires_grad=True)
        self.compress_weight_ev = torch.rand((embed_dim, 1),
                                             requires_grad=True)
        self.compress_weight_ve = torch.rand((embed_dim, 1),
                                             requires_grad=True)
        self.compress_weight_ee = torch.rand((embed_dim, 1),
                                             requires_grad=True)
        self.compress_bias_v = torch.rand(1, requires_grad=True)
        self.compress_bias_e = torch.rand(1, requires_grad=True)

        # mlp for low layer
        self.user_low_mlp_layer = linear_layer(embed_dim,
                                               embed_dim,
                                               dropout=0.5)
        self.relation_low_mlp_layer = linear_layer(embed_dim,
                                                   embed_dim,
                                                   dropout=0.5)

        # mlp for kg sub model
        self.kg_layers = nn.Sequential()
        layers = [2 * embed_dim] + hidden_layers
        for i in range(len(layers) - 1):
            self.kg_layers.add_module(
                'kg_hidden_layer_{}'.format(i + 1),
                linear_layer(layers[i], layers[i + 1], dropouts[i]))
        self.kg_layers.add_module('kg_last_layer',
                                  linear_layer(layers[-1], embed_dim))

        # mlp for recommend sub model
        self.rec_layers = nn.Sequential()
        layers = [2 * embed_dim] + hidden_layers
        for i in range(len(layers) - 1):
            self.rec_layers.add_module(
                'rec_hidden_layer_{}'.format(i + 1),
                linear_layer(layers[i], layers[i + 1], dropouts[i]))
        self.rec_layers.add_module('rec_last_layer',
                                   linear_layer(layers[-1], output_rec))