Ejemplo n.º 1
0
    def caps_conv(self,
                  ksize,
                  outdim,
                  outcaps,
                  stride=1,
                  activation='l2',
                  usebias=True):
        print('Caps_conv_bias:', usebias)
        # resize the input to [BSIZE, height, width, capsnum, vecdim]
        capsnum = self.inpsize[3]
        vecdim = self.inpsize[4]
        stride_ = [1, stride, stride, capsnum, 1]
        with tf.variable_scope('CapsConv_' + str(self.layernum)):
            res = []
            for i in range(outcaps):
                with tf.variable_scope('CapsConv_3dConv_' + str(i)):
                    k = L.weight([ksize, ksize, capsnum, vecdim, outdim])
                    buff = tf.nn.conv3d(self.result, k, stride_, 'SAME')
                    res.append(buff)
            self.result = tf.concat(res, axis=3)
            if usebias:
                b = L.bias([1, 1, 1, outcaps, outdim])
                self.result += b
            if activation == 'l2':
                self.result = tf.nn.l2_normalize(self.result, -1)
        self.layernum += 1
        self.inpsize = self.result.get_shape().as_list()

        return self.result
Ejemplo n.º 2
0
def enforcedClassifier(featurelayer,lbholder,dropout=1,multi=None,L2norm=False,L2const=10.0):
	with tf.variable_scope('Enforced_Softmax'):
		inp_shape = featurelayer.get_shape().as_list()
		inputdim = inp_shape[1]
		featurelayer = tf.nn.dropout(featurelayer,dropout)
		CLASS = lbholder.get_shape().as_list()[-1]
		w = L.weight([inputdim,CLASS])
		if L2norm:
			nfl = tf.nn.l2_normalize(featurelayer,1)
			buff = tf.matmul(nfl,tf.nn.l2_normalize(w,0))
			evallayer = tf.scalar_mul(L2const,buff)
		else:
			buff = tf.matmul(featurelayer,w)
			evallayer = tf.matmul(featurelayer,w)
		floatlb = tf.cast(lbholder,tf.float32)
		lbc = tf.ones_like(lbholder) - floatlb
		filteredmtx = tf.multiply(lbc,buff)
		#filteredmtx = tf.maximum(filteredmtx*1.2,filteredmtx*0.8)
		cosmtx = tf.multiply(floatlb,buff)
		if multi is not None:
			cosmtx = (tf.minimum(cosmtx*multi[0],cosmtx*multi[1]))*floatlb
		lstlayer = cosmtx+filteredmtx
		if L2norm:
			lstlayer = tf.scalar_mul(L2const, lstlayer)
	return lstlayer,evallayer
Ejemplo n.º 3
0
 def capsLayer(self, outchn, vdim2, iter_num, BSIZE=None):
     if BSIZE is None:
         BSIZE = self.result.get_shape().as_list()
     with tf.variable_scope('capLayer_' + str(self.layernum)):
         # input size: [BSIZE, capin, 1, vdim1,1]
         _, capin, _, vdim1, _ = self.inpsize
         W = L.weight([1, capin, outchn, vdim1, vdim2])
         W = tf.tile(W, [BSIZE, 1, 1, 1, 1])
         b = tf.constant(0,
                         dtype=tf.float32,
                         shape=[BSIZE, capin, outchn, 1, 1])
         res_tile = tf.tile(self.result, [1, 1, outchn, 1, 1])
         res = tf.matmul(
             W, res_tile,
             transpose_a=True)  # [BSIZE, capin, capout, vdim2, 1]
         for i in range(iter_num):
             with tf.variable_scope('Routing_' + str(self.layernum) + '_' +
                                    str(i)):
                 c = tf.nn.softmax(b, dim=2)
                 self.result = tf.reduce_sum(
                     c * res, 1,
                     keep_dims=True)  # [BSIZE, 1, capout, vdim2, 1]
                 self.squash()
                 if i != iter_num - 1:
                     b = tf.reduce_sum(self.result * res,
                                       -2,
                                       keep_dims=True)
         self.result = tf.einsum('ijklm->ikjlm', self.result)
         self.inpsize = [None, outchn, 1, vdim2, 1]
         self.layernum += 1
     return self.result
Ejemplo n.º 4
0
    def dyn_route(self, feature, iter_num, is_squash=True):
        with tf.variable_scope('route_merging_' + str(self.layernum)):
            v_dim = feature.get_shape().as_list()[-1]
            W = L.weight([vdim, vdim])

        def fusion(feat):
            if is_squash:
                feat = self.squash_2d(feat)
            res = tf.matmul(feat, W)
            b = tf.zeros(tf.shape(res)[0])
            for i in range(iter_num):
                with tf.variable_scope('Routing_' + str(self.layernum) + '_' +
                                       str(i)):
                    c = tf.nn.softmax(b)
                    c = tf.stop_gradient(c)  # dont know whether it s useful
                    feat = tf.reduce_mean(c * res, 0)
                    # feat = self.squash_2d(feat)
                    if i != iter_num - 1:
                        b = tf.reduce_sum(tf.matmul(res,
                                                    self.result,
                                                    transpose_b=True),
                                          1,
                                          keep_dims=True)
            return feat

        self.result = tf.map_fn(fusion, self.result)
        self.inpsize = self.result.get_shape().as_list()
        self.layernum += 1
        return self.result
def enforcedClassfier(featurelayer,
                      inputdim,
                      lbholder,
                      BSIZE,
                      CLASS,
                      enforced=False,
                      dropout=1):
    featurelayer = tf.nn.dropout(featurelayer, dropout)
    w = L.weight([inputdim, CLASS])
    nfl = tf.nn.l2_normalize(featurelayer, 1)
    buff = tf.matmul(nfl, tf.nn.l2_normalize(w, 0))
    evallayer = tf.matmul(featurelayer, w)
    if enforced:
        floatlb = tf.cast(lbholder, tf.float32)
        lbc = tf.ones([BSIZE, CLASS], dtype=tf.float32) - floatlb
        cosmtx = tf.multiply(floatlb, buff)
        filteredmtx = tf.multiply(lbc, buff)
        cosmtx2 = (tf.minimum(cosmtx * 0.9, cosmtx * 1.)) * floatlb
        #cosmtx2 = tf.multiply(cosmtx,floatlb)
        lstlayer = cosmtx2 + filteredmtx
        # lstlayer = tf.matmul(featurelayer,w)*lstlayer
        nb = tf.norm(w, axis=0, keep_dims=True)
        nf = tf.norm(featurelayer, axis=1, keep_dims=True)
        lstlayer = nb * lstlayer
        lstlayer = nf * lstlayer
    else:
        lstlayer = evallayer
    return lstlayer, evallayer
Ejemplo n.º 6
0
def enforcedClassfier2(featurelayer,
                       inputdim,
                       lbholder,
                       BSIZE,
                       CLASS,
                       enforced=False,
                       dropout=1):
    with tf.variable_scope('Enforced_Softmax'):
        if enforced:
            print('Enforced softmax loss is enabled.')
        featurelayer = tf.nn.dropout(featurelayer, dropout)
        w = L.weight([inputdim, CLASS])
        nfl = tf.nn.l2_normalize(featurelayer, 1)
        #nfl = tf.nn.dropout(nfl,dropout)
        buff = tf.matmul(nfl, tf.nn.l2_normalize(w, 0))
        constant = 40.0
        evallayer = tf.scalar_mul(constant, buff)
        if enforced:
            floatlb = tf.cast(lbholder, tf.float32)
            lbc = tf.ones([BSIZE, CLASS], dtype=tf.float32) - floatlb
            filteredmtx = tf.multiply(lbc, evallayer)
            #filteredmtx = tf.maximum(filteredmtx*1.2,filteredmtx*0.8)
            cosmtx = tf.multiply(floatlb, evallayer)
            cosmtx2 = (tf.minimum(cosmtx * 0.8, cosmtx * 1.2)) * floatlb
            lstlayer = cosmtx2 + filteredmtx
        else:
            lstlayer = evallayer
    return lstlayer, evallayer
Ejemplo n.º 7
0
def enforcedClassifier(featurelayer,
                       CLASS,
                       BSIZE,
                       lbholder,
                       dropout=1,
                       enforced=False,
                       L2norm=False,
                       L2const=10.0):
    with tf.variable_scope('Enforced_Softmax1'):
        if enforced:
            print('Enforced softmax loss is enabled.')
    with tf.variable_scope('Enforced_Softmax'):
        inp_shape = featurelayer.get_shape().as_list()
        inputdim = inp_shape[1]
        featurelayer = tf.nn.dropout(featurelayer, dropout)
        w = L.weight([inputdim, CLASS])
        nfl = tf.nn.l2_normalize(featurelayer, 1)
        buff = tf.matmul(nfl, tf.nn.l2_normalize(w, 0))
        if L2norm:
            evallayer = tf.scalar_mul(L2const, buff)
        else:
            evallayer = tf.matmul(featurelayer, w)
        if enforced:
            floatlb = tf.cast(lbholder, tf.float32)
            lbc = tf.ones([BSIZE, CLASS], dtype=tf.float32) - floatlb
            filteredmtx = tf.multiply(lbc, buff)
            #filteredmtx = tf.maximum(filteredmtx*1.2,filteredmtx*0.8)
            cosmtx = tf.multiply(floatlb, buff)
            cosmtx2 = (tf.minimum(cosmtx * 0.9, cosmtx * 1.1)) * floatlb
            lstlayer = cosmtx2 + filteredmtx
            if not L2norm:
                nb = tf.norm(w, axis=0, keep_dims=True)
                nf = tf.norm(featurelayer, axis=1, keep_dims=True)
                lstlayer = nb * lstlayer
                lstlayer = nf * lstlayer
        else:
            lstlayer = evallayer
    return lstlayer, evallayer