示例#1
0
 def lp_conv(self, input, k, s_h, s_w, bw, fl, rs, padding):
     """
             Low precision convolution.      
             """
     c = tf.nn.conv2d(input, k, [1, s_h, s_w, 1], padding=padding)
     return lp_op.lp(
         c, bw, rs
     )  # Do the right shift here and bit truncation and satturation here !
示例#2
0
 def lp_deconv(self, input, k, output_shape, strides, bw, fl, rs,rate, padding):
         """
         Low precision convolution.      
         """
         if rate==1:
                 c = tf.nn.conv2d_transpose(input, k, output_shape, strides=strides, padding=padding)   # Tensorflow
         else:
                 c = tf.nn.atrous_conv2d_transpose(input,k,output_shape,rate=rate,padding=padding)
         return lp_op.lp(c, bw, rs)      # Do the right shift here and bit truncation and satturation here !
示例#3
0
        def fc(self, input, num_out, name, bw=cfg.WORD_WIDTH, fl=10, rs=0, relu=True):
                print("FC DEBUG: input: \n")
                print(input)
                print("FC DEBUG: done input\n")
                with tf.variable_scope(name) as scope:
                        if isinstance(input, tuple):
                                input = input[0]

                        input_shape = input.get_shape()
                        if input_shape.ndims == 4:
                                dim = 1
                                print(" FC  input_shape: %s\n" %(str(input_shape)))
                                #print input
                                print( input_shape[1:])
                                for d in input_shape[1:].as_list():
                                        dim = dim * d
                                feed_in = tf.reshape(tf.transpose(input,[0,3,1,2]), [-1, dim])
                        else:
                                feed_in, dim = (input, int(input_shape[-1]))

                        if name == 'bbox_pred':
                                init_weights = tf.truncated_normal_initializer(0.0, stddev=0.001)
                                init_biases = tf.constant_initializer(0.0)
                        else:
                                init_weights = tf.truncated_normal_initializer(0.0, stddev=0.01)
                                init_biases = tf.constant_initializer(0.0)

                        weights = self.make_var('weights', [dim, num_out], init_weights, self.trainable, \
                                                                        regularizer=self.l2_regularizer(0.0005))
                        biases = self.make_var('biases', [num_out], init_biases, self.trainable)
                        biases_dummy = self.make_var('biases_dummy', [num_out], init_biases, self.trainable)    # Dummy bias to take care of saturation
                        if cfg.ENABLE_TENSORBOARD:
                                self.variable_summaries(biases)
                                self.variable_summaries(weights)
                        
                        fc = tf.nn.xw_plus_b(feed_in, weights, biases_dummy, name=scope.name)
                        if self.isHardware: 
                                fc = lp_op.lp(fc, bw, rs)
                        fc_biased = tf.add(fc, biases)  
        
                        if self.isHardware:
                                fc_biased_saturate = self.saturate(fc_biased, cfg.WORD_WIDTH)
                                if relu: 
                                        return tf.nn.relu(fc_biased_saturate)
                                return fc_biased_saturate

                        if relu:
                                return tf.nn.relu(fc_biased)
                        return fc_biased