示例#1
0
文件: knn.py 项目: kgl-prml/CUV
 def run(self, test):
     p = self.get_distance_matrix(test)
     p *= -1.0  # no argmin supported yet
     idx = cp.dev_tensor_uint(test.shape[0])
     cp.reduce_to_row(idx, p, cp.reduce_functor.ARGMAX)
     hidx = idx.np.reshape(idx.shape[0])
     return self.data_l.reshape(self.data.shape[0])[hidx]
示例#2
0
文件: knn.py 项目: stjordanis/CUV
 def run(self,test):
     p = self.get_distance_matrix(test)
     p *= -1.                # no argmin supported yet
     idx = cp.dev_tensor_uint(test.shape[0])
     cp.reduce_to_row(idx, p, cp.reduce_functor.ARGMAX)
     hidx  = idx.np.reshape(idx.shape[0])
     return self.data_l.reshape(self.data.shape[0])[hidx]
示例#3
0
文件: mlp.py 项目: 0rchard/CUV
  def delta_outputSoftMax(self, calculated, correct):
    derivative = calculated.copy()
    cp.apply_scalar_functor(derivative,  cp.scalar_functor.EXP)
    sums = cp.dev_tensor_float(calculated.shape[1])
    cp.fill(sums,0)
    cp.reduce_to_row(sums, derivative, cp.reduce_functor.ADD)
    cp.apply_scalar_functor(sums,cp.scalar_functor.ADD,0.1/derivative.shape[0])
    rv = cp.transposed_view(derivative)
    cp.matrix_divide_col(rv,sums)

    cp.apply_binary_functor(derivative,  correct,  cp.binary_functor.AXPBY, -1.,1.)
    sums.dealloc()

    return derivative
示例#4
0
文件: mlp.py 项目: stjordanis/CUV
    def delta_outputSoftMax(self, calculated, correct):
        derivative = calculated.copy()
        cp.apply_scalar_functor(derivative, cp.scalar_functor.EXP)
        sums = cp.dev_tensor_float(calculated.shape[1])
        cp.fill(sums, 0)
        cp.reduce_to_row(sums, derivative, cp.reduce_functor.ADD)
        cp.apply_scalar_functor(sums, cp.scalar_functor.ADD,
                                0.1 / derivative.shape[0])
        rv = cp.transposed_view(derivative)
        cp.matrix_divide_col(rv, sums)

        cp.apply_binary_functor(derivative, correct, cp.binary_functor.AXPBY,
                                -1., 1.)
        sums.dealloc()

        return derivative
示例#5
0
文件: ais.py 项目: stjordanis/CUV
    def p_k(self,beta,tmp,tmp2,collect):
        cp.prod(tmp,self.v,self.baserate_bias,'t','n')
        cp.apply_scalar_functor(tmp,cp.scalar_functor.MULT,(1-beta))
        collect(tmp)
        cp.prod(tmp2,self.w,self.v,'t','n')
        cp.matrix_plus_col(tmp2,self.bias_hi)

        cp.apply_scalar_functor(tmp2,cp.scalar_functor.MULT,beta)

        # RECT computes log(1+exp(x))
        cp.apply_scalar_functor(tmp2,cp.scalar_functor.RECT,1)

        cp.reduce_to_row(tmp.T,tmp2,cp.reduce_functor.ADD) # tmp.T is an evil hack. it makes tmp into row major, which doesn't change anything since it's a vector any way. But vectors are always assumed to be row major.
        collect(tmp)
        cp.prod(tmp,self.v,self.bias_lo.T,'t','n')
        cp.apply_scalar_functor(tmp,cp.scalar_functor.MULT,beta)
        collect(tmp)
示例#6
0
    def partialsumV(self, actv, acth, row):
        """
         sums out hidden variables for given v
          exp( log(exp(bh + actv*W)+1).sum(axis=0) + (v*bv).sum(axis=0) )
        """
        # acth = bv + actv*W
        cp.prod(acth, self.weight, actv, "t", "n")
        cp.matrix_plus_col(acth, self.bh)

        # acth = log(exp(acth)+1)
        cp.apply_scalar_functor(acth, cp.scalar_functor.RECT, 1.0)

        # row = actv.sum(axis=0)
        cp.reduce_to_row(row, acth, cp.reduce_functor.ADD)

        # row += h*bh
        cp.matrix_times_col(actv, self.bv)
        cp.reduce_to_row(row, actv, cp.reduce_functor.ADD, 1.0, 1.0)

        # exp(row)
        m = row.np.astype("float64")

        return math.fsum(m.flatten()) / actv.shape[1]
示例#7
0
    def partialsumV(self, actv, acth, row):
        """
         sums out hidden variables for given v
          exp( log(exp(bh + actv*W)+1).sum(axis=0) + (v*bv).sum(axis=0) )
        """
        # acth = bv + actv*W
        cp.prod(acth, self.weight, actv, 't', 'n')
        cp.matrix_plus_col(acth, self.bh)

        # acth = log(exp(acth)+1)
        cp.apply_scalar_functor(acth, cp.scalar_functor.RECT, 1.0)

        # row = actv.sum(axis=0)
        cp.reduce_to_row(row, acth, cp.reduce_functor.ADD)

        # row += h*bh
        cp.matrix_times_col(actv, self.bv)
        cp.reduce_to_row(row, actv, cp.reduce_functor.ADD, 1.0, 1.0)

        # exp(row)
        m = row.np.astype("float64")

        return math.fsum(m.flatten()) / actv.shape[1]
示例#8
0
    def partialsum(self, acth, actv, row):
        """
        sums out visible variables for given hidden variables
          exp( log(exp(bv + acth*W)+1).sum(axis=0) + (h*bh).sum(axis=0) )
        """
        # actv = bv + acth*W
        cp.prod(actv, self.weight, acth, "n", "n")
        cp.matrix_plus_col(actv, self.bv)

        # actv = log(exp(actv)+1)
        cp.apply_scalar_functor(actv, cp.scalar_functor.RECT, 1.0)

        # row = actv.sum(axis=0)
        cp.reduce_to_row(row, actv, cp.reduce_functor.ADD)

        # row += h*bh
        cp.matrix_times_col(acth, self.bh)
        cp.reduce_to_row(row, acth, cp.reduce_functor.ADD, 1.0, 1.0)
        # cp.prod(row,self.bv,actv,'t','n',1.0,1.0)

        # exp(row)
        m = row.np.astype("float64")

        return math.fsum(np.exp(m).flatten())
示例#9
0
    def partialsum(self, acth, actv, row):
        """
        sums out visible variables for given hidden variables
          exp( log(exp(bv + acth*W)+1).sum(axis=0) + (h*bh).sum(axis=0) )
        """
        # actv = bv + acth*W
        cp.prod(actv, self.weight, acth, 'n', 'n')
        cp.matrix_plus_col(actv, self.bv)

        # actv = log(exp(actv)+1)
        cp.apply_scalar_functor(actv, cp.scalar_functor.RECT, 1.0)

        # row = actv.sum(axis=0)
        cp.reduce_to_row(row, actv, cp.reduce_functor.ADD)

        # row += h*bh
        cp.matrix_times_col(acth, self.bh)
        cp.reduce_to_row(row, acth, cp.reduce_functor.ADD, 1.0, 1.0)
        #cp.prod(row,self.bv,actv,'t','n',1.0,1.0)

        # exp(row)
        m = row.np.astype("float64")

        return math.fsum(np.exp(m).flatten())