def update(self, instance):
     uid, iid, lid = instance
     ## calculate single gradient ##
     # intermediate #
     L1 = self._L1(uid, iid)
     L1grad = softmaxGradient(L1, lid)
     # gradient #
     delt_W1bi = TensorOuterFull([L1grad, self.u[uid][0], self.v[iid][0]])
     delt_B1 = L1grad * 1.0
     delt_u_1 = L1grad * 1.0
     delt_v_1 = L1grad * 1.0
     delt_u_0 = np.tensordot(a = L1grad, axes = (0,0),
                           b = np.tensordot(self.W1bi, self.v[iid][0], axes=(-1,0)))
     delt_v_0 = np.tensordot(a = L1grad, axes = (0,0),
                           b = np.tensordot(self.W1bi, self.u[uid][0], axes=(-2,0)))
     # update #
     self.W1bi += (self.SGDstep * (delt_W1bi - self.lamda * self.W1bi))
     self.B1 += (self.SGDstep * (delt_B1 - self.lamda * self.B1))
     self.u[uid][0] += (self.SGDstep * (delt_u_0 - self.lamda * self.u[uid][0]))
     self.v[iid][0] += (self.SGDstep * (delt_v_0 - self.lamda * self.v[iid][0]))
     self.u[uid][1] += (self.SGDstep * (delt_u_1 - self.lamda * self.u[uid][1]))
     self.v[iid][1] += (self.SGDstep * (delt_v_1 - self.lamda * self.v[iid][1]))
     # ### test ###
     # # reduce to MultiMA #
     # self.W1bi[:,:,:] = 0.0
     self.B1[:] = 0.0
     return self
 def update(self, instance):
     uid, iid, lid = instance
     ## calculate single gradient ##
     # intermediate #
     L1 = self._L1(uid, iid)
     outL1 = self._outL1(L1)
     L2 = denseLayer(outL1, self.W2, self.B2)
     L2grad = softmaxGradient(L2, lid)
     L1grad = np.multiply(denseLayerGradBP(L2grad, self.W2),
                          self._gradL1(L1))
     # gradient #
     delt_W2 = TensorOuterFull([L2grad, outL1])
     delt_B2 = L2grad * 1.0
     delt_W1bi = TensorOuterFull([L1grad, self.u[uid], self.v[iid]])
     delt_W1u = TensorOuterFull([L1grad, self.u[uid]])
     delt_W1v = TensorOuterFull([L1grad, self.v[iid]])
     delt_B1 = L1grad * 1.0
     delt_u = np.tensordot(
         a=L1grad,
         axes=(0, 0),
         b=(self.W1u + np.tensordot(self.W1bi, self.v[iid], axes=(-1, 0))))
     delt_v = np.tensordot(
         a=L1grad,
         axes=(0, 0),
         b=(self.W1v + np.tensordot(self.W1bi, self.u[uid], axes=(-2, 0))))
     # update #
     self.W2 += (self.SGDstep * (delt_W2 - self.lamda * self.W2))
     self.B2 += (self.SGDstep * (delt_B2 - self.lamda * self.B2))
     self.W1bi += (self.SGDstep * (delt_W1bi - self.lamda * self.W1bi))
     self.W1u += (self.SGDstep * (delt_W1u - self.lamda * self.W1u))
     self.W1v += (self.SGDstep * (delt_W1v - self.lamda * self.W1v))
     self.B1 += (self.SGDstep * (delt_B1 - self.lamda * self.B1))
     self.u[uid] += (self.SGDstep * (delt_u - self.lamda * self.u[uid]))
     self.v[iid] += (self.SGDstep * (delt_v - self.lamda * self.v[iid]))
     return self
Example #3
0
 def update(self, instance):
     uid, iid, lid = instance
     ## calculate single gradient ##
     # intermediate #
     L1 = self._L1(uid, iid)
     outL1 = self._outL1(L1)
     m = np.sum(np.multiply(self.W2, outL1), axis=1)
     mgrad = softmaxGradient(m, lid)
     L1grad = transMultiply(np.multiply(self._gradL1(L1), self.W2), mgrad)
     # gradient #
     delt_W2 = transMultiply(outL1, mgrad)
     delt_W1bi = TensorOuterFull([L1grad, self.u[uid], self.v[iid]])
     delt_W1u = TensorOuterFull([L1grad, self.u[uid]])
     delt_W1v = TensorOuterFull([L1grad, self.v[iid]])
     delt_B1 = 1.0 * L1grad
     delt_u = np.tensordot(
         a=L1grad.reshape(self.L * self.k),
         axes=(0, 0),
         b=(np.tensordot(self.W1bi, self.v[iid], axes=(-1, 0)) +
            self.W1u).reshape([(self.L * self.k), self.d]))
     delt_v = np.tensordot(
         a=L1grad.reshape(self.L * self.k),
         axes=(0, 0),
         b=(np.tensordot(self.W1bi, self.u[uid], axes=(-2, 0)) +
            self.W1v).reshape([(self.L * self.k), self.d]))
     # update #
     self.W2 += (self.SGDstep * (delt_W2 - self.lamda * self.W2))
     self.W1bi += (self.SGDstep * (delt_W1bi - self.lamda * self.W1bi))
     self.W1u += (self.SGDstep * (delt_W1u - self.lamda * self.W1u))
     self.W1v += (self.SGDstep * (delt_W1v - self.lamda * self.W1v))
     self.B1 += (self.SGDstep * (delt_B1 - self.lamda * self.B1))
     self.u[uid] += (self.SGDstep * (delt_u - self.lamda * self.u[uid]))
     self.v[iid] += (self.SGDstep * (delt_v - self.lamda * self.v[iid]))
     return self
    def update(self, instance):
        """
        update embeddings according to a single instance with SGD
        instance: [UserId, ItemId, LabelId]
        """
        uid, iid, lid = instance
        ## calculate update step ##
        # intermediate #
        m = np.sum(np.multiply(self.u[uid], self.v[iid]), axis=1)
        mgrad = softmaxGradient(m, lid)
        # for u #
        delt_u = np.transpose(np.multiply(np.transpose(self.v[iid]), mgrad))
        # for v #
        delt_v = np.transpose(np.multiply(np.transpose(self.u[uid]), mgrad))

        self.u[uid] += (self.SGDstep * (delt_u - self.lamda * self.u[uid]))
        self.v[iid] += (self.SGDstep * (delt_v - self.lamda * self.v[iid]))
        return self
Example #5
0
    def update(self, instance):
        """
        update embeddings according to a single instance with SGD
        instance: [UserId, ItemId, LabelId]
        """
        uid, iid, lid = instance
        ## calculate update step ##
        # intermediate #
        m = self.u[uid] + self.v[iid]
        mgrad = softmaxGradient(m, lid)
        # for u #
        delt_u = mgrad
        # for v #
        delt_v = mgrad

        self.u[uid] += (self.SGDstep * (delt_u - self.lamda * self.u[uid]))
        self.v[iid] += (self.SGDstep * (delt_v - self.lamda * self.v[iid]))
        return self
 def update(self, instance):
     uid, iid, lid = instance
     ## calculate single gradient ##
     # intermediate #
     L1 = self._L1(uid, iid)
     L1grad = softmaxGradient(L1, lid)
     # gradient #
     delt_W1bi = TensorOuterFull([L1grad, self.u[uid], self.v[iid]])
     delt_W1u = TensorOuterFull([L1grad, self.u[uid]])
     delt_W1v = TensorOuterFull([L1grad, self.v[iid]])
     delt_B1 = L1grad * 1.0
     delt_u = np.tensordot(
         a=L1grad,
         axes=(0, 0),
         b=(self.W1u + np.tensordot(self.W1bi, self.v[iid], axes=(-1, 0))))
     delt_v = np.tensordot(
         a=L1grad,
         axes=(0, 0),
         b=(self.W1v + np.tensordot(self.W1bi, self.u[uid], axes=(-2, 0))))
     # update #
     self.W1bi += (self.SGDstep * (delt_W1bi - self.lamda * self.W1bi))
     self.W1u += (self.SGDstep * (delt_W1u - self.lamda * self.W1u))
     self.W1v += (self.SGDstep * (delt_W1v - self.lamda * self.W1v))
     self.B1 += (self.SGDstep * (delt_B1 - self.lamda * self.B1))
     self.u[uid] += (self.SGDstep * (delt_u - self.lamda * self.u[uid]))
     self.v[iid] += (self.SGDstep * (delt_v - self.lamda * self.v[iid]))
     # ### test ###
     # # reduce to MultiMA #
     # assert self.W1u.shape[0] == self.W1u.shape[1]
     # self.W1bi[:,:,:] = 0.0
     # for i in range(self.L):
     #     self.W1u[i,:] = 0.0
     #     self.W1u[i,i] = 1.0
     #     self.W1v[i,:] = 0.0
     #     self.W1v[i,i] = 1.0
     # self.B1[:] = 0.0
     return self
Example #7
0
    def update(self, instance):
        """
        instance: [UserId, ItemId, LabelId] where LabelID should be index in range(self.L)
        """
        uid, iid, lid = instance
        ## calculate single gradient ##
        # intermediate #
        m = TDreconstruct(self.c, self.u[uid], self.v[iid], self.r)
        mgrad = softmaxGradient(m, lid)
        # gradient for embeddings #
        delt_u = np.tensordot(a=mgrad, axes=(0, 1),
                              b=np.tensordot(a=self.v[iid], axes=(0, 1),
                                             b=np.tensordot(a=self.c, axes=(2, 1),
                                                            b=self.r)
                                             )
                              )
        delt_v = np.tensordot(a=mgrad, axes=(0, 1),
                              b=np.tensordot(a=self.u[uid], axes=(0, 0),
                                             b=np.tensordot(a=self.c, axes=(2, 1),
                                                            b=self.r)
                                             )
                              )
        delt_c = matrixTool.TensorOuter([self.u[uid], self.v[iid], np.tensordot(mgrad, self.r, axes=(0, 0))])
        delt_r = np.outer(mgrad, np.tensordot(a=self.u[uid], axes=(0, 0),
                                              b=np.tensordot(a=self.v[iid], axes=(0, 1),
                                                             b=self.c)))
        # update #
        self.u[uid] += (self.SGDstep * (delt_u - self.lamda * self.u[uid]))
        self.v[iid] += (self.SGDstep * (delt_v - self.lamda * self.v[iid]))
        self.c += (self.SGDstep * (delt_c - self.lamda * self.c))
        self.r += (self.SGDstep * (delt_r - self.lamda * self.r))

        ### test ###
        # m = TDreconstruct(self.c, self.u[uid], self.v[iid], self.r)
        # print "m after update", m
        return self