Пример #1
0
  def _setup_prediction(self):
    self.batch_size = self.inputs['data'].get_shape().as_list()[0]

    # LAYER 0: INPUT layer: 2D tensor. Three rows (channels) x window size 
    current_layer = self.inputs['data']
    d1, d2, d3 = current_layer.get_shape().as_list()
    print ("[\033[94mINFO\033[0m] LAYER input shape = "+str(d1)+"x"+str(d2)+"x"+str(d3))

    # LAYER 1-8: 8 CONVOLUTINAL layers, 32 1D kernels of size 3 each, stride 2, zero padding
    c = 32  # number of channels per conv layer
    ksize = self.config.ksize  # size of the convolution kernel
    depth = self.config.num_conv_layers
    for i in range(depth):
        current_layer = layers.conv1(current_layer, c, ksize, stride=2, scope='conv{}'.format(i+1), padding='SAME')
        d1, d2, d3 = current_layer.get_shape().as_list()
        print ("[\033[94mINFO\033[0m] LAYER "+'conv{}'.format(i+1)+" shape = "+str(d1)+"x"+str(d2)+"x"+str(d3))
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, current_layer)
        self.layers['conv{}'.format(i+1)] = current_layer
        if self.config.pooling:
          # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
          #https://www.tensorflow.org/api_docs/python/tf/nn/pool
          current_layer = tf.nn.pool(current_layer, window_shape=[self.config.pooling_window], pooling_type='MAX', padding='SAME', strides=[self.config.pooling_stride])
          d1, d2, d3 = current_layer.get_shape().as_list()
          print ("[\033[94mINFO\033[0m] LAYER "+'max_pool{}'.format(i+1)+" shape = "+str(d1)+"x"+str(d2)+"x"+str(d3))
          tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, current_layer)
          self.layers['pool{}'.format(i+1)] = current_layer

    bs, width, _ = current_layer.get_shape().as_list()
    current_layer = tf.reshape(current_layer, [bs, width*c], name="reshape")

    # FULLY CONNECTED LAYERS
    for i in range(0, self.config.num_fc_layers-1):
      # LAYER 10: FULLY CONNECTED
      current_layer = layers.fc(current_layer, self.config.fc_size, scope='fc{}'.format(i+1), activation_fn=None)
      d1, d2 = current_layer.get_shape().as_list()
      print ("[\033[94mINFO\033[0m] FC LAYER "+'{}'.format(i+1)+" shape = "+str(d1)+"x"+str(d2))
      self.layers['fc{}'.format(i+1)] = current_layer
      tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, current_layer)

    # LAYER: LAST FULLY CONNECTED (LOGITS)
    current_layer = layers.fc(current_layer, self.config.n_clusters, scope='logits', activation_fn=None)
    d1, d2 = current_layer.get_shape().as_list()
    print ("[\033[94mINFO\033[0m] FC LAYER LOGITS shape = "+str(d1)+"x"+str(d2))
    self.layers['logits'] = current_layer
    tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, current_layer)
    
    self.layers['class_prob'] = tf.nn.softmax(current_layer, name='class_prob')
    self.layers['class_prediction'] = tf.argmax(self.layers['class_prob'], 1, name='class_pred')

    tf.contrib.layers.apply_regularization(
        tf.contrib.layers.l2_regularizer(self.config.regularization),
        weights_list=tf.get_collection(tf.GraphKeys.WEIGHTS))
Пример #2
0
  def _setup_prediction(self):
    self.batch_size = self.inputs['data'].get_shape().as_list()[0]

    current_layer = self.inputs['data']
    d1, d2, d3 = current_layer.get_shape().as_list()
    print ("[\033[94mINFO\033[0m] INPUT LAYER shape = "+str(d1)+"x"+str(d2)+"x"+str(d3))
    c = 32  # number of channels per conv layer
    ksize = 3  # size of the convolution kernel
    depth = 8
    for i in range(depth):
        current_layer = layers.conv1(current_layer, c, ksize, stride=2, scope='conv{}'.format(i+1), padding='SAME')
        d1, d2, d3 = current_layer.get_shape().as_list()
        print ("[\033[94mINFO\033[0m] LAYER "+'conv{}'.format(i+1)+" shape = "+str(d1)+"x"+str(d2)+"x"+str(d3))

        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, current_layer)
        self.layers['conv{}'.format(i+1)] = current_layer

    bs, width, _ = current_layer.get_shape().as_list()

    #print ("[\033[94mINFO\033[0m] LAST LAYER CONV shape = "+str(bs)+"x"+str(width)+"x"+str(_))

    current_layer = tf.reshape(current_layer, [bs, width*c], name="reshape")

    d1, d2 = current_layer.get_shape().as_list()
    print ("[\033[94mINFO\033[0m] LAST LAYER CONV RESHAPED shape = "+str(d1)+"x"+str(d2))

    current_layer = layers.fc(current_layer, self.config.n_clusters, scope='logits', activation_fn=None)
    
    d1, d2 = current_layer.get_shape().as_list()
    print ("[\033[94mINFO\033[0m] FC LAYER shape = "+str(d1)+"x"+str(d2))
    self.layers['logits'] = current_layer
    tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, current_layer)

    self.layers['class_prob'] = tf.nn.softmax(current_layer, name='class_prob')
    self.layers['class_prediction'] = tf.argmax(self.layers['class_prob'], 1, name='class_pred')

    tf.contrib.layers.apply_regularization(
        tf.contrib.layers.l2_regularizer(self.config.regularization),
        weights_list=tf.get_collection(tf.GraphKeys.WEIGHTS))
Пример #3
0
    def _setup_prediction(self):
        self.batch_size = self.inputs['data'].get_shape().as_list()[0]

        current_layer = self.inputs['data']
        print current_layer
        c = 64  # number of channels per conv layer
        ksize = 3  # size of the convolution kernel
        depth = 6
        for i in range(depth):
            current_layer = layers.conv1(current_layer,
                                         c,
                                         ksize,
                                         stride=1,
                                         scope='conv{}'.format(i + 1),
                                         padding='SAME')
            current_layer = layers.max_pooling(current_layer, 2)
            tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, current_layer)
            self.layers['conv{}'.format(i + 1)] = current_layer

        bs, width, _ = current_layer.get_shape().as_list()
        current_layer = tf.reshape(current_layer, [bs, width * c],
                                   name="reshape")

        current_layer = layers.fc(current_layer,
                                  3,
                                  scope='det_logits',
                                  activation_fn=None)
        self.layers['logits'] = current_layer
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, current_layer)

        self.layers['class_prob'] = tf.nn.softmax(current_layer,
                                                  name='det_class_prob')
        self.layers['class_prediction'] = tf.argmax(self.layers['class_prob'],
                                                    1,
                                                    name='det_class_pred')

        tf.contrib.layers.apply_regularization(
            tf.contrib.layers.l2_regularizer(self.config.regularization),
            weights_list=tf.get_collection(tf.GraphKeys.WEIGHTS))
Пример #4
0
 def _setup_prediction(self):
   # def inputs
   self.batch_size = self.inputs['data'].get_shape().as_list()[0]
   current_layer = self.inputs['data']
   current_layer = tf.squeeze(current_layer, [1])
   
   # def hypo-params
   c     = 32 # number of channels per conv layer
   ksize = 3  # size of the convolution kernel
   depth = 8
   lamb  = 1e-4 # l2 reg
   # def model structure
   for i in range(depth):
       current_layer = layers.conv1(current_layer, c, ksize, stride=1, 
                                   activation_fn=None, scope='conv{}'.format(i+1))
       current_layer = layers.batch_norm(current_layer, 
                                       'conv{}_batch_norm'.format(i+1), 
                                       self.is_training)
       current_layer = tf.nn.relu(current_layer)
       self.layers['conv0_{}'.format(i+1)] = current_layer
       current_layer = layers.max_pooling(current_layer, 2)
       tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, current_layer)
       self.layers['conv{}'.format(i+1)] = current_layer
   # softmax regression
   bs, width, _ = current_layer.get_shape().as_list()
   current_layer = tf.reshape(current_layer, [bs, width*c], name="reshape")
   
   current_layer = layers.fc(current_layer, 2, scope='det_logits', activation_fn=None)
   self.layers['logits'] = current_layer
   tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, current_layer)
   # output prediction
   self.layers['class_prob'] = tf.nn.softmax(current_layer, name='det_class_prob')
   self.layers['class_prediction'] = tf.argmax(self.layers['class_prob'], 1, name='det_class_pred')
   
   # add L2 regularization
   tf.contrib.layers.apply_regularization(
       tf.contrib.layers.l2_regularizer(lamb),
       weights_list=tf.get_collection(tf.GraphKeys.WEIGHTS))
Пример #5
0
    def _setup_prediction(self):
        
        self.batch_size = self.inputs['data'].get_shape().as_list()[0]
    
        current_layer = self.inputs['data']
        #n_channels = 32  # number of channels per conv layer
        ksize = 3  # size of the convolution kernel
        for i in range(self.n_conv_layers):
            current_layer = layers.conv1(current_layer, self.n_channels, ksize, stride=2, scope='conv{}'.format(i + 1), padding='SAME')
            tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, current_layer)
            self.layers['conv{}'.format(i + 1)] = current_layer
    
        bs, width, _ = current_layer.get_shape().as_list()
        n_fc_nodes = width * self.n_channels
        current_layer = tf.reshape(current_layer, [bs, n_fc_nodes], name="reshape")
        
        # 20180916 AJL - include stram_max in fc layers
        stream_max_tensor = tf.expand_dims(self.inputs['stream_max'], 1)
        current_layer = tf.concat([current_layer, stream_max_tensor], 1)
        n_fc_nodes += 1
    
        for i in range(self.n_fc_layers - 1):
            current_layer = layers.fc(current_layer, n_fc_nodes, scope='fc{}'.format(i + 1), activation_fn=tf.nn.relu)
        current_layer = layers.fc(current_layer, self.cfg.n_distances + self.cfg.n_magnitudes + self.cfg.n_depths + self.cfg.n_azimuths, \
                                  scope='logits', activation_fn=None)

        istart = 0
        iend = self.cfg.n_distances
        self.layers['distance_logits'] = current_layer[ : , istart : iend]
        self.layers['distance_prob'] = tf.nn.softmax(self.layers['distance_logits'], name='distance_prob')
        self.layers['distance_prediction'] = tf.argmax(self.layers['distance_prob'], 1, name='distance_pred')
        istart = iend
        
        self.layers['magnitude_logits'] = tf.constant(-1)
        self.layers['magnitude_prob'] = tf.constant(-1)
        self.layers['magnitude_prediction'] = tf.constant(-1)
        if self.cfg.n_magnitudes > 0:
            iend += self.cfg.n_magnitudes
            self.layers['magnitude_logits'] = current_layer[ : , istart : iend]
            self.layers['magnitude_prob'] = tf.nn.softmax(self.layers['magnitude_logits'], name='magnitude_prob')
            self.layers['magnitude_prediction'] = tf.argmax(self.layers['magnitude_prob'], 1, name='magnitude_pred')
            istart = iend
            
        self.layers['depth_logits'] = tf.constant(-1)
        self.layers['depth_prob'] = tf.constant(-1)
        self.layers['depth_prediction'] = tf.constant(-1)
        if self.cfg.n_depths > 0:
            iend += self.cfg.n_depths
            self.layers['depth_logits'] = current_layer[ : , istart : iend]
            self.layers['depth_prob'] = tf.nn.softmax(self.layers['depth_logits'], name='depth_prob')
            self.layers['depth_prediction'] = tf.argmax(self.layers['depth_prob'], 1, name='depth_pred')
            istart = iend
   
        self.layers['azimuth_logits'] = tf.constant(-1)
        self.layers['azimuth_prob'] = tf.constant(-1)
        self.layers['azimuth_prediction'] = tf.constant(-1)
        if self.cfg.n_azimuths > 0:
            iend += self.cfg.n_azimuths
            self.layers['azimuth_logits'] = current_layer[ : , istart : iend]
            self.layers['azimuth_prob'] = tf.nn.softmax(self.layers['azimuth_logits'], name='azimuth_prob')
            self.layers['azimuth_prediction'] = tf.argmax(self.layers['azimuth_prob'], 1, name='azimuth_pred')
            istart = iend

    
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, current_layer)
    
        tf.contrib.layers.apply_regularization(
            tf.contrib.layers.l2_regularizer(self.cfg.regularization),
            weights_list=tf.get_collection(tf.GraphKeys.WEIGHTS))