示例#1
0
	def implicitReflection(self, inp):
		inpMask = tf.sign(inp)
		V = NNs.defineParam('V', [self.inputDim, LATENT_DIM], reg=True)
		T = NNs.defineParam('T', [LATENT_DIM, LATENT_DIM], reg=False)
		h1 = inp @ V
		h2 = h1 @ T * ENHANCE / (tf.reduce_sum(inpMask, axis=-1, keepdims=True) + 1e-6)
		h3 = tf.nn.sigmoid(Bias(h1 + h2))
		pred = FC(h3, self.inputDim, useBias=True, reg=True, activation='sigmoid')
		self.refLoss = tf.reduce_sum(tf.square(T - tf.transpose(V) @ V))
		return pred
示例#2
0
 def userToItem(self, utlats):
     embeds = []
     paramId = 'dfltP%d' % NNs.getParamId()
     for i in range(args.behNums):
         adj = self.i_ut_adjs[i]
         rows, cols = adj['rows'], adj['cols']
         colLats = tf.nn.embedding_lookup(utlats, cols)
         ones = tf.concat([tf.ones_like(rows), [0]], axis=0)
         rows = tf.concat([rows, [self.itmNum - 1]], axis=0)
         colLats = tf.concat([colLats, tf.zeros([1, args.latdim])], axis=0)
         embed = tf.math.segment_sum(colLats, rows) / (tf.to_float(
             tf.expand_dims(tf.math.segment_sum(ones, rows), axis=-1)) +
                                                       1e-6)
         behTrans_w = FC(tf.expand_dims(self.behEmbeds[i], axis=0),
                         args.memosize,
                         activation='relu',
                         useBias=True,
                         reg=True,
                         name=paramId + '_a',
                         reuse=True)
         behTrans = tf.reshape(
             FC(behTrans_w,
                args.latdim**2,
                reg=True,
                name=paramId + '_b',
                reuse=True), [args.latdim, args.latdim])
         embed = Activate(embed @ behTrans, self.actFunc)
         embeds.append(embed)
     # return tf.add_n(embeds)
     return self.handleMultBehEmbeds(embeds)
示例#3
0
 def itemToUser(self, ilats):
     embeds = []
     paramId = 'dfltP%d' % NNs.getParamId()
     for i in range(args.behNums):
         mask = tf.to_float(
             tf.expand_dims(tf.equal(self.ut_i_beh - 1, i), axis=-1))
         embed = tf.reduce_sum(
             mask * ilats, axis=1) / (tf.reduce_sum(mask, axis=1) + 1e-6)
         behTrans_w = FC(tf.expand_dims(self.behEmbeds[i], axis=0),
                         args.memosize,
                         activation='relu',
                         useBias=True,
                         reg=True,
                         name=paramId + '_a',
                         reuse=True)
         behTrans = tf.reshape(
             FC(behTrans_w,
                args.latdim**2,
                reg=True,
                name=paramId + '_b',
                reuse=True), [args.latdim, args.latdim])
         embed = Activate(embed @ behTrans, self.actFunc)
         embeds.append(embed)
     # return tf.add_n(embeds)
     return self.handleMultBehEmbeds(embeds)
示例#4
0
    def mine(self, interaction):
        activation = 'relu'
        V = NNs.defineParam('v', [self.inpDim, args.latdim], reg=True)
        divideLst = self.divide(interaction)
        catlat1 = []
        for dividInp in divideLst:
            catlat1.append(dividInp @ V)
        catlat2 = self.selfAttention(catlat1,
                                     number=self.intTypes,
                                     inpDim=args.latdim)
        catlat3 = list()
        self.memoAtt = []
        for i in range(self.intTypes):
            resCatlat = catlat2[i] + catlat1[i]
            memoatt = FC(resCatlat,
                         args.memosize,
                         activation='relu',
                         reg=True,
                         useBias=True)
            memoTrans = tf.reshape(
                FC(memoatt, args.latdim**2, reg=True, name='memoTrans'),
                [-1, args.latdim, args.latdim])
            self.memoAtt.append(memoatt)

            tem = tf.reshape(resCatlat, [-1, 1, args.latdim])
            transCatlat = tf.reshape(tem @ memoTrans, [-1, args.latdim])
            catlat3.append(transCatlat)

        stkCatlat3 = tf.stack(catlat3, axis=1)

        weights = NNs.defineParam('fuseAttWeight', [1, self.intTypes, 1],
                                  reg=True,
                                  initializer='zeros')
        sftW = tf.nn.softmax(weights * 2, axis=1)
        fusedLat = tf.reduce_sum(sftW * stkCatlat3, axis=1)
        self.memoAtt = tf.stack(self.memoAtt, axis=1)

        lat = fusedLat
        for i in range(2):
            lat = FC(lat,
                     args.latdim,
                     useBias=True,
                     reg=True,
                     activation=activation) + lat
        return lat
示例#5
0
    def ours(self):
        self.generateEmbeds()
        alluEmbed0 = NNs.defineParam('uEmbed0', [args.user, args.latdim],
                                     reg=True)
        alliEmbed0 = NNs.defineParam('iEmbed0', [args.item, args.latdim],
                                     reg=True)
        uEmbed0 = tf.nn.embedding_lookup(alluEmbed0, self.allUsrs)
        iEmbed0 = tf.nn.embedding_lookup(alliEmbed0, self.allItms)

        ulats = [uEmbed0]
        ilats = [iEmbed0]
        utlats = []
        self.atts = []
        for i in range(args.gnn_layer):
            # i to u
            ilat0, att = self.sequenceModeling(ilats[-1])
            self.atts.append(tf.squeeze(att))
            utlat = self.itemToUser(ilat0)
            # utlats.append(utlat)
            ulat = self.aggregateSubUsers(utlat)
            ulats.append(ulat)

            # u to i
            utlat0 = self.getTimeAwareULats(ulats[-2])
            utlats.append(utlat0)
            ilat = self.userToItem(utlat0)
            ilats.append(ilat)
        if args.gnn_layer == 0:
            utlats.append(self.getTimeAwareULats(ulats[0]))
        utlat = tf.add_n(utlats)
        ulat = tf.add_n(ulats)
        ilat = tf.add_n(ilats)
        pckULat = tf.nn.embedding_lookup(utlat, self.utids)
        pckILat = tf.nn.embedding_lookup(ilat, self.iids)

        predLat = pckULat * pckILat * args.mult

        for i in range(args.deep_layer):
            predLat = FC(predLat,
                         args.latdim,
                         reg=True,
                         useBias=True,
                         activation=self.actFunc) + predLat
        pred = tf.squeeze(FC(predLat, 1, reg=True, useBias=True))
        return pred
示例#6
0
 def sequenceModeling(self, lats):
     itemEmbeds = tf.nn.embedding_lookup(lats, self.ut_i_item - 1)
     posEmbeds = tf.nn.embedding_lookup(self.timeEmbeds, self.ut_i_time)
     behEmbeds = tf.nn.embedding_lookup(self.behEmbeds, self.ut_i_beh - 1)
     posEmbeds = tf.reshape(
         FC(tf.reshape(posEmbeds, [-1, args.latdim * 2]),
            args.latdim,
            reg=True,
            useBias=True,
            activation=self.actFunc), [-1, args.subUsrSize, args.latdim])
     behEmbeds = tf.reshape(
         FC(tf.reshape(behEmbeds, [-1, args.latdim]),
            args.latdim,
            reg=True,
            useBias=True,
            activation=self.actFunc), [-1, args.subUsrSize, args.latdim])
     biasEmbed = posEmbeds + behEmbeds
     embeds = (itemEmbeds + biasEmbed) * tf.expand_dims(
         tf.to_float(tf.sign(self.ut_i_item)), [-1])
     Q = NNs.defineRandomNameParam(
         [args.latdim, args.att_head, args.latdim // args.att_head],
         reg=True)
     K = NNs.defineRandomNameParam(
         [args.latdim, args.att_head, args.latdim // args.att_head],
         reg=True)
     # V = NNs.defineRandomNameParam([args.latdim, args.att_head, args.latdim//args.att_head], reg=True)
     q = tf.expand_dims(tf.einsum('ijk,klm->ijlm', embeds, Q), axis=2)
     k = tf.expand_dims(tf.einsum('ijk,klm->ijlm', embeds, K), axis=1)
     # v = tf.expand_dims(tf.einsum('ijk,klm->ijlm', embeds, V), axis=1)
     v = tf.reshape(embeds, [
         -1, 1, args.subUsrSize, args.att_head, args.latdim // args.att_head
     ])
     logits = tf.reduce_sum(q * k, axis=-1, keepdims=True)
     exp = tf.math.exp(logits) * (1.0 - tf.to_float(tf.equal(logits, 0.0)))
     norm = (tf.reduce_sum(exp, axis=2, keepdims=True) + 1e-6)
     att = exp / norm
     ret = tf.reshape(tf.reduce_sum(att * v, axis=2),
                      [-1, args.subUsrSize, args.latdim]) + embeds
     # ret = embeds
     return ret / 2, att
示例#7
0
    def prepareModel(self):
        self.intTypes = 4
        self.interaction = tf.placeholder(dtype=tf.int32,
                                          shape=[None, self.inpDim],
                                          name='interaction')
        self.posLabel = tf.placeholder(dtype=tf.int32,
                                       shape=[None, None],
                                       name='posLabel')
        self.negLabel = tf.placeholder(dtype=tf.int32,
                                       shape=[None, None],
                                       name='negLabel')
        intEmbed = tf.reshape(self.mine(self.interaction),
                              [-1, 1, args.latdim])
        self.learnedEmbed = tf.reshape(intEmbed, [-1, args.latdim])

        W = NNs.defineParam('W', [self.inpDim, args.latdim], reg=True)
        posEmbeds = tf.transpose(tf.nn.embedding_lookup(W, self.posLabel),
                                 [0, 2, 1])
        negEmbeds = tf.transpose(tf.nn.embedding_lookup(W, self.negLabel),
                                 [0, 2, 1])
        sampnum = tf.shape(self.posLabel)[1]

        posPred = tf.reshape(intEmbed @ posEmbeds, [-1, sampnum])
        negPred = tf.reshape(intEmbed @ negEmbeds, [-1, sampnum])
        self.posPred = posPred

        self.preLoss = tf.reduce_mean(
            tf.reduce_sum(tf.maximum(0.0, 1.0 - (posPred - negPred)), axis=-1))
        self.regLoss = args.reg * Regularize(method='L2')
        self.loss = self.preLoss + self.regLoss

        globalStep = tf.Variable(0, trainable=False)
        learningRate = tf.train.exponential_decay(args.lr,
                                                  globalStep,
                                                  args.decay_step,
                                                  args.decay,
                                                  staircase=True)
        self.optimizer = tf.train.AdamOptimizer(learningRate).minimize(
            self.loss, global_step=globalStep)
示例#8
0
 def generateEmbeds(self):
     self.behEmbeds = NNs.defineParam('behEmbeds',
                                      [args.behNums, args.latdim],
                                      reg=False)
     # self.posEmbeds = NNs.defineParam('posEmbeds', [args.subUsrSize, args.latdim], reg=False)
     self.timeEmbeds = self.makeTimeEmbed()
示例#9
0
    def prepareModel(self):
        self.rows = tf.constant(self.handler.rows)
        self.cols = tf.constant(self.handler.cols)
        self.vals = tf.reshape(
            tf.constant(self.handler.vals, dtype=tf.float32), [-1, 1, 1, 1])
        self.hyperAdj = defineParam('hyperAdj', [args.hyperNum, args.areaNum],
                                    reg=True)
        self.feats = tf.placeholder(
            name='feats',
            dtype=tf.float32,
            shape=[args.areaNum, args.temporalRange, args.offNum])
        self.dropRate = tf.placeholder(name='dropRate',
                                       dtype=tf.float32,
                                       shape=[])

        self.labels = tf.placeholder(name='labels',
                                     dtype=tf.float32,
                                     shape=[args.areaNum, args.offNum])
        self.preds, embed = self.ours()

        if args.task == 'c':
            posInd = tf.cast(tf.greater(self.labels, 0), tf.float32)
            negInd = tf.cast(tf.less(self.labels, 0), tf.float32)
            posPred = tf.cast(tf.greater_equal(self.preds, args.border),
                              tf.float32)
            negPred = tf.cast(tf.less(self.preds, args.border), tf.float32)
            NNs.addReg('embed',
                       embed * tf.expand_dims(posInd + negInd, axis=-1))
            self.preLoss = tf.reduce_sum(
                -(posInd * tf.log(self.preds + 1e-8) +
                  negInd * tf.log(1 - self.preds + 1e-8))) / (
                      tf.reduce_sum(posInd) + tf.reduce_sum(negInd))
            self.truePos = tf.reduce_sum(posPred * posInd, axis=0)
            self.falseNeg = tf.reduce_sum(negPred * posInd, axis=0)
            self.trueNeg = tf.reduce_sum(negPred * negInd, axis=0)
            self.falsePos = tf.reduce_sum(posPred * negInd, axis=0)
        elif args.task == 'r':
            self.mask = tf.placeholder(name='mask',
                                       dtype=tf.float32,
                                       shape=[args.areaNum, args.offNum])
            self.preLoss = tf.reduce_sum(
                tf.square(self.preds - self.labels) *
                self.mask) / tf.reduce_sum(self.mask)
            self.sqLoss = tf.reduce_sum(tf.square(self.preds - self.labels) *
                                        self.mask,
                                        axis=0)
            self.absLoss = tf.reduce_sum(tf.abs(self.preds - self.labels) *
                                         self.mask,
                                         axis=0)
            self.tstNums = tf.reduce_sum(self.mask, axis=0)
            posMask = self.mask * tf.cast(tf.greater(self.labels, 0.5),
                                          tf.float32)
            self.apeLoss = tf.reduce_sum(tf.abs(self.preds - self.labels) /
                                         (self.labels + 1e-8) * posMask,
                                         axis=0)
            self.posNums = tf.reduce_sum(posMask, axis=0)
            NNs.addReg('embed', embed * tf.expand_dims(self.mask, axis=-1))

        self.regLoss = args.reg * Regularize() + args.spreg * tf.reduce_sum(
            tf.abs(self.hyperAdj))
        self.loss = self.preLoss + self.regLoss

        globalStep = tf.Variable(0, trainable=False)
        learningRate = tf.train.exponential_decay(args.lr,
                                                  globalStep,
                                                  args.decay_step,
                                                  args.decay,
                                                  staircase=True)
        self.optimizer = tf.train.AdamOptimizer(learningRate).minimize(
            self.loss, global_step=globalStep)