Exemple #1
0
	def deploy(self,input_layer):
		with tf.variable_scope('RPN_',reuse=self.reuse):
			shared_feature = L.conv2D(input_layer,3,512,stride=self.anchor_stride,name='share_conv')
			shared_feature = L.relu(shared_feature,'share_relu')
			rpn_bf_logits = L.conv2D(shared_feature,1,2*self.anchors_per_loc,'bf')
			rpn_bf_logits = tf.reshape(rpn_bf_logits,[tf.shape(rpn_bf_logits)[0],-1,2])
			rpn_bf_prob = tf.nn.softmax(rpn_bf_logits)
			rpn_bbox = L.conv2D(shared_feature,1,4*self.anchors_per_loc,'bbox')
			rpn_bbox = tf.reshape(rpn_bbox,[tf.shape(rpn_bbox[0],-1,2)])
		self.reuse = True
		return rpn_bf_logits, rpn_bf_prob, rpn_bbox
Exemple #2
0
    def SelfAttention(self, att_num=None, is_fc=False, residual=False):
        assert is_fc or att_num, 'must state attention feature num for conv'

        def flatten_hw(layer):
            shape = layer.get_shape().as_list()
            layer = tf.reshape(layer, [-1, shape[1] * shape[2], shape[3]])
            return layer

        with tf.variable_scope('att_' + str(self.layernum)):
            # conv each of them
            current = self.result
            current_shape = current.get_shape().as_list()
            orig_num = current_shape[-1]
            if is_fc:
                f = L.Fcnn(current, orig_num, 'att_fc_f' + str(self.layernum))
                g = L.Fcnn(current, orig_num, 'att_fc_g' + str(self.layernum))
                h = L.Fcnn(current, orig_num, 'att_fc_h' + str(self.layernum))
                f = tf.expand_dims(f, axis=-1)
                g = tf.expand_dims(g, axis=-1)
                h = tf.expand_dims(h, axis=-1)
            else:
                f = L.conv2D(current, 1, att_num,
                             'att_conv_f_' + str(self.layernum))
                g = L.conv2D(current, 1, att_num,
                             'att_conv_g_' + str(self.layernum))
                h = L.conv2D(current, 1, orig_num,
                             'att_conv_h_' + str(self.layernum))

                # flatten them
                f = flatten_hw(f)
                g = flatten_hw(g)
                h = flatten_hw(h)

            # softmax(fg)
            fg = tf.matmul(f, g, transpose_b=True)
            fg = tf.nn.softmax(fg, -1)

            # out = scale(softmax(fg)h) + x
            scale = tf.get_variable('Variable',
                                    shape=[],
                                    initializer=tf.constant_initializer(0.0))
            out = tf.matmul(fg, h)
            if is_fc:
                out = tf.reshape(out, [-1, orig_num])
            else:
                out = tf.reshape(out, [-1] + current_shape[1:3] + [orig_num])
            if residual:
                out = out + current
            self.layernum += 1
            self.inpsize = out.get_shape().as_list()
            self.result = out
        return self.result
Exemple #3
0
 def convLayer(self,
               size,
               outchn,
               stride=1,
               pad='SAME',
               activation=-1,
               batch_norm=False,
               layerin=None):
     with tf.variable_scope('conv_' + str(self.layernum)):
         if isinstance(size, list):
             kernel = size
         else:
             kernel = [size, size]
         if layerin != None:
             self.result = layerin[0]
             self.inpsize = list(layerin[1])
         self.result = L.conv2D(self.result,
                                kernel,
                                outchn,
                                'conv_' + str(self.layernum),
                                stride=stride,
                                pad=pad)
         self.varlist = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
         if batch_norm:
             self.result = L.batch_norm(self.result,
                                        'batch_norm_' + str(self.layernum))
         self.layernum += 1
         if pad == 'VALID':
             self.inpsize[1] -= kernel[0] - stride
             self.inpsize[2] -= kernel[1] - stride
         self.inpsize[1] = self.inpsize[1] // stride
         self.inpsize[2] = self.inpsize[2] // stride
         self.inpsize[3] = outchn
         self.activate(activation)
     return [self.result, list(self.inpsize)]
Exemple #4
0
def get_rpn_layers(c2,c3,c4,c5):
	P5 = L.conv2D(c5,1,256,'P5')
	P4 = L.upSampling(P5,2,'U5') + L.conv2D(c4,1,256,'P4')
	P3 = L.upSampling(P4,2,'U4') + L.conv2D(c3,1,256,'P3')
	P2 = L.upSampling(P3,2,'U3') + L.conv2D(c2,1,256,'P2')
	P2 = L.conv2D(P2,3,256,'P22')
	P3 = L.conv2D(P3,3,256,'P32')
	P4 = L.conv2D(P4,3,256,'P42')
	P5 = L.conv2D(P5,3,256,'P52')
	P6 = L.maxpooling(P5,1,2,'P6')
	return P2,P3,P4,P5,P6
Exemple #5
0
	def convLayer(self,size,outchn,dilation_rate=1,stride=1,pad='SAME',activation=-1,batch_norm=False,layerin=None,usebias=True,kernel_data=None,bias_data=None,weight_norm=False):
		with tf.variable_scope('conv_'+str(self.layernum)):
			if isinstance(size,list):
				kernel = size
			else:
				kernel = [size,size]
			if layerin!=None:
				self.result = layerin
				self.inpsize = layerin.get_shape().as_list()
			self.result = L.conv2D(self.result,kernel,outchn,'conv_'+str(self.layernum),stride=stride,pad=pad,usebias=usebias,kernel_data=kernel_data,bias_data=bias_data,dilation_rate=dilation_rate,weight_norm=weight_norm)
			if batch_norm:
				self.result = L.batch_norm(self.result,'batch_norm_'+str(self.layernum),training=self.bntraining,epsilon=self.epsilon)
			self.layernum += 1
			self.inpsize = self.result.get_shape().as_list()
			self.activate(activation)
		return self.result
 def convLayer(self,
               size,
               outchn,
               stride=1,
               pad='SAME',
               activation=-1,
               batch_norm=False,
               layerin=None,
               usebias=True,
               kernel_data=None,
               bias_data=None):
     with tf.variable_scope('conv_' + str(self.layernum)):
         if isinstance(size, list):
             kernel = size
         else:
             kernel = [size, size]
         if layerin != None:
             self.result = layerin[0]
             self.inpsize = list(layerin[1])
         self.result = L.conv2D(self.result,
                                kernel,
                                outchn,
                                'conv_' + str(self.layernum),
                                stride=stride,
                                pad=pad,
                                usebias=usebias,
                                kernel_data=kernel_data,
                                bias_data=bias_data)
         self.varlist = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
         if batch_norm:
             self.result = L.batch_norm(self.result,
                                        'batch_norm_' + str(self.layernum),
                                        training=self.bntraining)
         self.layernum += 1
         self.inpsize = self.result.get_shape().as_list()
         self.activate(activation)
     return [self.result, list(self.inpsize)]
Exemple #7
0
    sp = tf_util.fullyConnected(y_vec,
                                crop_dims,
                                leaky_relu,
                                std='xavier',
                                scope='sp1')
    sp = tf_util.fullyConnected(y_vec,
                                crop_dims**2,
                                leaky_relu,
                                std='xavier',
                                scope='sp2')
    sp = tf.reshape(sp, (Nbatch, crop_dims, crop_dims, 1))

    y_sp = tf_util.conv2D(sp,
                          nfilters=Nfilters,
                          activation=leaky_relu,
                          init=init,
                          scope='sp3')
    y_sp_1 = tf_util.conv2D(y_sp,
                            nfilters=Nfilters,
                            activation=leaky_relu,
                            init=init,
                            scope='sp4')
    y_sp_2 = tf_util.conv2D(y_sp_1,
                            nfilters=Nfilters,
                            activation=leaky_relu,
                            init=init,
                            scope='sp5')

    yhat = tf_util.conv2D(y_sp_2,
                          nfilters=1,
Exemple #8
0
def conv_block(x,
               num_filters=32,
               filter_dims=[5, 5],
               fc_size=1024,
               scope='conv_block',
               batch_size=4):

    s = x.get_shape().as_list()

    with tf.variable_scope(scope):
        #downsample image with stride [3,3]
        a = conv2D(x,
                   dims=[7, 7],
                   filters=num_filters,
                   strides=[3, 3],
                   std='xavier',
                   padding='VALID',
                   activation=tf.nn.relu,
                   scope='conv1')

        #no downsampling with stride [1,1]
        a = conv2D(a,
                   filter_dims,
                   filters=num_filters,
                   strides=[1, 1],
                   std='xavier',
                   padding='SAME',
                   activation=tf.nn.relu,
                   scope='conv2')

        num_filters = 2 * num_filters
        #downsample image with stride [2,2]
        a = conv2D(a,
                   filter_dims,
                   filters=num_filters,
                   strides=[2, 2],
                   std='xavier',
                   padding='VALID',
                   activation=tf.nn.relu,
                   scope='conv3')

        #no downsampling with stride [1,1]
        a = conv2D(a,
                   filter_dims,
                   filters=num_filters,
                   strides=[1, 1],
                   std='xavier',
                   padding='SAME',
                   activation=tf.nn.relu,
                   scope='conv4')

        num_filters = 2 * num_filters
        #downsample image with stride [2,2]
        a = conv2D(a,
                   filter_dims,
                   filters=num_filters,
                   strides=[2, 2],
                   std='xavier',
                   padding='VALID',
                   activation=tf.nn.relu,
                   scope='conv5')

        #no downsampling with stride [1,1]
        a = conv2D(a,
                   filter_dims,
                   filters=num_filters,
                   strides=[1, 1],
                   std='xavier',
                   padding='SAME',
                   activation=tf.nn.relu,
                   scope='conv6')

        #downsample image with stride [2,2]
        num_filters = 32
        a = conv2D(a,
                   filter_dims,
                   filters=num_filters,
                   strides=[2, 2],
                   std='xavier',
                   padding='VALID',
                   activation=tf.nn.relu,
                   scope='conv5')

        #Convert to vector with fullyconnected layer
        a = tf.reshape(a, shape=[batch_size, -1])

        a = fullyConnected(a,
                           output_units=fc_size,
                           activation=tf.nn.relu,
                           std='xavier',
                           scope='fc')

        print "output vector of conv_block is: {}".format(a)
        return a