def build(self):
        print "build PGS_NET_WB"
        params = self.params
        N = params.batch_size  # number of groups/display_ids per batch
        A = params.max_ads  # maximum number of Ads per display_id
        F = params.meta_features  # number of meta features per Ad

        yb = tf.placeholder('float32', shape=[N, A], name='yb')
        X = tf.placeholder('float32', shape=[N, A, F],
                           name='x')  # zero padding
        Xmask = tf.placeholder(
            'float32', shape=[N, A], name='xmask'
        )  # in {-e10, 1}, 1 for real Ads and -e10 for padding Ads
        #Xads = tf.placeholder('float32', shape=[N], name='xads') # number of Ads per display_id
        y = tf.placeholder('float32', shape=[N, A],
                           name='y')  # y in {0, 1} with zero padding
        is_training = tf.placeholder(dtype=tf.bool)

        if self.params.softmax_transform:
            print("softmax_transform")
            Xtmp = X + tf.reshape(Xmask, [N, A, 1])
            Xtmp = tf.exp(Xtmp)
            stmp = tf.reduce_sum(Xtmp, 1, keep_dims=True) + 1e-5
            Xtmp = Xtmp / stmp
        else:
            Xtmp = X

        with tf.name_scope("Fully-connected"):
            Xtmp = tf.reshape(Xtmp, [N * A, F])
            with tf.variable_scope("Layer1"):
                ytmp = fully_connected(Xtmp,
                                       num_neurons=50,
                                       name='W1',
                                       is_training=is_training,
                                       use_batch_norm=True,
                                       use_drop_out=False,
                                       keep_prob=0.7,
                                       activation='relu',
                                       default_batch=params.default_batch)
            with tf.variable_scope("Layer2"):
                ytmp = fully_connected(ytmp,
                                       num_neurons=25,
                                       name='W2',
                                       is_training=is_training,
                                       use_batch_norm=True,
                                       use_drop_out=False,
                                       keep_prob=1.0,
                                       activation='relu',
                                       default_batch=params.default_batch)
            #with tf.variable_scope("Layer4"):
            #    ytmp = fully_connected(ytmp, num_neurons=10, name='W4', is_training = is_training, use_batch_norm=True, use_drop_out=False, keep_prob = 1.0, activation = 'relu', default_batch = params.default_batch)

            with tf.variable_scope("Layer3"):
                ytmp = fully_connected(ytmp,
                                       num_neurons=1,
                                       name='W3',
                                       is_training=is_training,
                                       use_batch_norm=True,
                                       use_drop_out=False,
                                       keep_prob=1,
                                       activation='None',
                                       default_batch=params.default_batch)

        # ytmp is [N*A, 1] now

        yp = tf.reshape(ytmp,
                        [N, A]) * yb + Xmask  #+ yb# masking the padding Ads
        # yp is [N, A] now

        with tf.name_scope('Loss'):
            # Cross-Entropy loss
            cross_entropy = tf.nn.softmax_cross_entropy_with_logits(yp, y)
            loss = tf.reduce_mean(cross_entropy)
            total_loss = loss + params.weight_decay * tf.add_n(
                tf.get_collection('l2'))

        with tf.name_scope('Predict'):
            pred = tf.nn.softmax(yp)

        if self.params.opt == 'adam':
            optimizer = tf.train.AdamOptimizer(params.learning_rate)
        elif self.params.opt == 'sgd':
            optimizer = tf.train.GradientDescentOptimizer(params.learning_rate)
        elif self.params.opt == 'ada':
            optimizer = tf.train.AdagradOptimizer(params.learning_rate)
        elif self.params.opt == 'rmsprop':
            optimizer = tf.train.RMSPropOptimizer(params.learning_rate)
        opt_op = optimizer.minimize(total_loss, global_step=self.global_step)

        self.predictions = pred
        self.loss = cross_entropy
        self.total_loss = total_loss
        self.opt_op = opt_op

        self.yb = yb
        self.x = X
        self.y = y
        self.xmask = Xmask
        self.is_train = is_training
예제 #2
0
    def build(self):
        params = self.params
        N = params.batch_size  # number of groups/display_ids per batch
        A = params.max_ads  # maximum number of Ads per display_id
        F = params.meta_features  # number of meta features per Ad

        X = tf.placeholder('float32', shape=[N, A, F],
                           name='x')  # zero padding
        Xmask = tf.placeholder(
            'float32', shape=[N, A], name='xmask'
        )  # in {-e10, 1}, 1 for real Ads and -e10 for padding Ads
        #Xads = tf.placeholder('float32', shape=[N], name='xads') # number of Ads per display_id
        y = tf.placeholder('float32', shape=[N, A],
                           name='y')  # y in {0, 1} with zero padding
        is_training = tf.placeholder(tf.bool)

        if self.params.softmax_transform:
            print("softmax_transform")
            Xtmp = X + tf.reshape(Xmask, [N, A, 1])
            Xtmp = tf.exp(Xtmp)
            stmp = tf.reduce_sum(Xtmp, 1, keep_dims=True) + 1e-5
            Xtmp = Xtmp / stmp
        else:
            Xtmp = X

        with tf.name_scope("Fully-connected"):
            Xtmp = tf.reshape(Xtmp, [N * A, F])
            with tf.variable_scope("Layer1"):
                ytmp = fully_connected(Xtmp,
                                       num_neurons=50,
                                       name='W1',
                                       is_training=is_training,
                                       use_batch_norm=True,
                                       use_drop_out=False,
                                       keep_prob=0.7,
                                       activation='sigmoid')
            with tf.variable_scope("Layer2"):
                ytmp = fully_connected(ytmp,
                                       num_neurons=25,
                                       name='W2',
                                       is_training=is_training,
                                       use_batch_norm=True,
                                       use_drop_out=False,
                                       keep_prob=1.0,
                                       activation='sigmoid')
            with tf.variable_scope("Layer3"):
                ytmp = fully_connected(ytmp,
                                       num_neurons=1,
                                       name='W3',
                                       is_training=is_training,
                                       use_batch_norm=True,
                                       use_drop_out=False,
                                       keep_prob=1,
                                       activation='None')

        # ytmp is [N*A, 1] now

        yp = tf.reshape(ytmp, [N, A]) * Xmask  # masking the padding Ads
        # yp is [N, A] now

        with tf.name_scope('Loss'):
            # Cross-Entropy loss
            #yp = ytmp * tf.reshape(Xmask,[N*A, 1])
            #y = tf.reshape(y,)
            #cross_entropy = tf.nn.softmax_cross_entropy_with_logits(yp, y)
            yp = tf.maximum(yp, 1e-5)
            yp = tf.minimum(yp, 1 - 1e-5)
            cross_entropy = -(y * tf.log(yp) + (1 - y) * tf.log(1 - yp))
            loss = tf.reduce_mean(cross_entropy)
            total_loss = loss + params.weight_decay * tf.add_n(
                tf.get_collection('l2'))

        with tf.name_scope('Predict'):
            pred = yp  #tf.nn.softmax(yp)

        optimizer = tf.train.AdamOptimizer(params.learning_rate)
        opt_op = optimizer.minimize(total_loss, global_step=self.global_step)

        self.predictions = pred
        self.loss = cross_entropy
        self.total_loss = total_loss
        self.opt_op = opt_op

        self.x = X
        self.y = y
        self.xmask = Xmask
        self.is_train = is_training