Esempio n. 1
0
 def general_lnl_aggregation(self, h, intra, inter, local):
     name = 'scaling_beta'
     output = tf.multiple((1 - self.W[name]), local) + \
              tf.multiply(self.W[name], tf.add(intra, inter))
     local[:, int(self.depth / 2)] = output
     output = h + local
     return output
Esempio n. 2
0
 def general_lnl_aggregation2(self, h, intra, inter, local):
     name = 'scaling_beta'
     name_2 = 'scaling_alpha'
     output = tf.multiple(self.W[name_2], local) + tf.multiply(
         self.W[name], inter) + tf.multiply(1 - self.W[name], intra)
     local[:, int(self.depth / 2)] = output
     output = h + local
     return output
Esempio n. 3
0
def goal_decoder(flat_goal, dec_arch, state_dim, act_fun):
    initiation_hat = flat_goal
    for idx, layer_size in enumerate(dec_arch):
        initiation_hat = act_fun(
            linear(initiation_hat,
                   "goal_dec_fc{}".format(idx),
                   layer_size,
                   init_scale=np.sqrt(2)))

    mn = act_fun(
        linear(initiation_hat,
               "initiation_mean",
               state_dim,
               init_scale=np.sqrt(2)))
    sd = 0.5 * act_fun(
        linear(
            initiation_hat, "initiation_std", state_dim,
            init_scale=np.sqrt(2)))
    eps = tf.random_normal(shape=[tf.shape(initiation_hat)[0], state_dim],
                           mean=0.0,
                           stddev=1.0)
    state_I_hat = mn + tf.multiple(eps, tf.exp(sd))

    return state_I_hat, mn, sd
Esempio n. 4
0
 def PAP_lnl_aggregation2(self, intra, inter, local):
     name = 'scaling_beta'
     name_2 = 'scaling_alpha'
     output = tf.multiple(self.W[name_2], local) + tf.multiply(self.W[name], inter) + tf.multiply(1 - self.W[name],
                                                                                                  intra)
     return output
Esempio n. 5
0
 def PAP_lnl_aggregation(self, intra, inter, local):
     name = 'scaling_beta'
     output = tf.multiple((1 - self.W[name]), local) + \
              tf.multiply(self.W[name], tf.add(intra, inter))
     return output
    def _init_graph(self):
        self.graph = tf.Graph()

        with self.graph.as_default():

            tf.set_random_seed(self.random_seed)
            self.feat_index=tf.placeholder(tf.int32,shape=[None,None],name='feat_index')
            self.feat_value=tf.placeholder(tf.float32,shape=[None,None],name='feat_value')
            self.label=tf.placeholder(tf.float32,shape=[None,1],name='label')
            self.dropout_keep_fm=tf.placeholder(tf.float32,shape=[None],name='dropout_keep_fm')
            self.dropout_keep_deep=tf.placeholder(tf.float32,shape=[None],name='dropout_keep_deep')
            self.train_phase=tf.placeholder(tf.bool,name='train_phase')

            self.weights = self._initialize_weights()

            # model
            self.embeddings = tf.nn.embedding_lookup(self.weights['feature_embeddings'],self.feat_index) # None*F*K
            feat_value = tf.reshape(self.feat_value,shape=[-1,self.field_size,1])
            self.embeddings = tf.multiply(self.embeddings,feat_value)

            # first order term
            self.y_first_order = tf.nn.embedding_lookup(self.weights['feature_bias'],self.feat_index)
            self.y_first_order = tf.reduce_sum(tf.multiple(self.y_first_order,feat_value),2)
            self.y_first_order = tf.nn.dropout(self.y_first_order,self.dropout_keep_fm[0])

            # second order term
            # sum-square-part
            self.summed_features_emb = tf.reduce_sum(self.embeddings,1)
            self.summed_features_emb_square = tf.square(self.summed_features_emb)

            # square-sum-part
            self.squared_features_emb = tf.square(self.embeddings)
            self.squared_sum_features_emb = tf.reduce_sum(self.squared_features_emb,1)

            # second order
            self.y_second_order = 0.5*tf.subtract(self.summed_features_emb_square,self.squared_sum_features_emb)
            self.y_second_order = tf.nn.dropout(self.y_second_order,self.dropout_keep_fm[1])

            # Deep component
            self.y_deep = tf.reshape(self.embeddings,shape = [-1,self.field_size*self.embedding_size])
            self.y_deep = tf.nn.dropout(self.y_deep,self.dropout_keep_deep[0])

            for i in range(0,len(self.deep_layers)):
                self.y_deep = tf.add(tf.matmul(self.y_deep,self.weights["layer_%d"%i]),self.weights["bias_%d"%i])
                self.y_deep = self.deep_layers_activation(self.y_deep)
                self.y_deep = tf.nn.dropout(self.y_deep,self.dropout_keep_deep[1+i])

            #DeepFM
            if self.use_fm and self.use_deep:
                concat_input = tf.concat([self.y_first_order,self.y_second_order,self.y_deep],axis=1)
            elif self.use_fm:
                concat_input = tf.concat([self.y_first_order,self.y_second_order],axis=1)
            elif self.use_deep:
                concat_input = self.y_deep
            self.out = tf.add(tf.matmul(concat_input,self.weights["concat_projection"]),self.weights["concat_bias"])

            # loss
            if self.loss_type == "logloss":
                self.out = tf.nn.sigmoid(self.out)
                self.loss = tf.losses.log_loss(self.label,self.out)
            elif self.log_loss == "mse":
                self.loss = tf.nn.l2_loss(tf.subtract(self.label,self.out))

            # L2 regularization on weights
            if self.l2_reg > 0:
                self.loss += tf.contrib.layers.l2_regularizer(self.l2_reg)(self.weights["concat_projection"])
                if self.use_deep:
                    for i in range(len(self.deep_layers)):
                        self.loss += tf.contrib.layers.l2_regularizer(self.l2_reg)(self.weights["layer_%d"%i])
            # optimizer
            if self.optimizer_type =="adam":
                self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate,beta1=0.9,beta2=0.999,epsilon=1e-8).minimize(self.loss)
            elif self.optimizer =="adagrad":
                self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate,initial_accumulator_value=1e-8).minimize(self.loss)
            elif self.optimizer =="gd":
                self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
            elif self.optimizer=="momentum":
                self.optimizer=tf.train.MomentumOptimizer(learning_rate=self.learning_rate,momentum=0.95).minimize(self.loss)
            elif self.optimizer_type == "yellowfin":
                self.optimizer = YFOptimizer(learning_rate=self.learning_rate, momentum=0.0).minimize(self.loss)
            # init
            self.saver = tf.train.Saver()
            init = tf.global_variables_initializer()
            self.sess = self._init_session()
            self.sess.run(init)
            #number of params
            total_parameters = 0
            for variable in self.weights.values():
                shape = variable.get_shape()
                variable_parameters = 1
                for dim in shape:
                    variable_parameters *= dim.value
                total_parameters += variable_parameters
            if self.verbose > 0:
                print("#params:%d"%total_parameters)
        def _init_session(self):
            config = tf.ConfigProto(device_count={"gpu":0})
            config.gpu_options.allow_growth = True
            return tf.Session(config=config)

        def _initialize_weights(self):
            weights = dict()
            #embeddings
            weights["feature_embeddings"]=tf.Variable(tf.random_normal([self.feature_size,self.embedding_size],0.0,0.01),name="feature_embeddings")
            weights["feature_bias"]=tf.Variable([tf.random_normal(self.feature_size,1),0.0,1.0,name="feature_bias"])
            #deep layers
            num_layer = len(self.deep_layers)