Пример #1
0
 def phi(self, x, ext_wts=None, reuse=None):
     """Feature extraction function.
 Args:
   x: [N, H, W, C]. Input.
   reuse: Whether to reuse variables here.
 """
     config = self.config
     is_training = self.is_training
     dtype = self.dtype
     with tf.variable_scope("phi", reuse=reuse):
         h, wts = cnn(x,
                      config.filter_size,
                      strides=config.strides,
                      pool_fn=[tf.nn.max_pool] * len(config.pool_fn),
                      pool_size=config.pool_size,
                      pool_strides=config.pool_strides,
                      act_fn=[tf.nn.relu for aa in config.conv_act_fn],
                      add_bias=True,
                      init_std=config.conv_init_std,
                      init_method=config.conv_init_method,
                      wd=config.wd,
                      dtype=dtype,
                      batch_norm=True,
                      is_training=is_training,
                      ext_wts=ext_wts)
         if self._embedding_weights is None:
             self._embedding_weights = wts
         h_shape = h.get_shape()
         h_size = 1
         for ss in h_shape[1:]:
             h_size *= int(ss)
         h = tf.reshape(h, [-1, h_size])
     return h
Пример #2
0
    def phi(x, reuse=None, scope="Model", is_training=True, ext_wts=None):
        """Feature extraction function.

    Args:
      x: [N, H, W, C]. Input.
      reuse: Whether to reuse variables here.
      scope: Variable scope.
      ext_wts: A dictionary of external weights to be used from.
    """
        with tf.name_scope(scope):
            with tf.variable_scope("Model", reuse=reuse):
                h, wts = cnn(x,
                             config.filter_size,
                             strides=config.strides,
                             pool_fn=[tf.nn.max_pool] * len(config.pool_fn),
                             pool_size=config.pool_size,
                             pool_strides=config.pool_strides,
                             act_fn=[tf.nn.relu for aa in config.conv_act_fn],
                             add_bias=True,
                             init_std=config.conv_init_std,
                             init_method=config.conv_init_method,
                             wd=config.wd,
                             dtype=dtype,
                             batch_norm=True,
                             is_training=is_training,
                             ext_wts=ext_wts)
                h_size = reduce(lambda x, y: x * y,
                                [int(ss) for ss in h.get_shape()[1:]])
                h = tf.reshape(h, [-1, h_size])
                return h, wts
Пример #3
0
    def __call__(self,
                 x,
                 is_training=True,
                 ext_wts=None,
                 reuse=None,
                 **kwargs):
        """See Backbone class for documentation."""
        config = self.config
        dtype = self.dtype
        assert not config.add_last_relu
        L = len(config.conv_act_fn)
        if config.add_last_relu:
            act_fn = [tf.nn.relu for aa in range(L)]
        else:
            act_fn = [tf.nn.relu for aa in range(L - 1)] + [None]
        with tf.variable_scope("phi", reuse=reuse):
            assert config.wd == 5e-4, '{}'.format(config.wd)
            h, wts = cnn(x,
                         config.filter_size,
                         strides=config.strides,
                         pool_fn=[tf.nn.max_pool] * len(config.pool_fn),
                         pool_size=config.pool_size,
                         pool_strides=config.pool_strides,
                         act_fn=act_fn,
                         add_bias=False,
                         init_std=config.conv_init_std,
                         init_method=config.conv_init_method,
                         wd=config.wd,
                         dtype=dtype,
                         batch_norm=True,
                         is_training=is_training,
                         ext_wts=ext_wts)
            if self.weights is None:
                self.weights = wts
            h_shape = h.get_shape()
            h_size = 1
            for ss in h_shape[1:]:
                h_size *= int(ss)

            if ext_wts is not None:
                # PyTorch NCHW mode.
                h = tf.transpose(h, [0, 3, 1, 2])
            h = tf.reshape(h, [-1, h_size])
            assert h_size == 3200
        return h
Пример #4
0
    def phi(self,
            x,
            ext_wts=None,
            reuse=tf.AUTO_REUSE,
            update_batch_stats=False):  #AYAD
        """Feature extraction function.
    Args:
      x: [N, H, W, C]. Input.
      reuse: Whether to reuse variables here.

    """
        config = self.config
        is_training = self.is_training
        dtype = self.dtype
        with tf.variable_scope("phi", reuse=reuse):
            h, wts = cnn(x,
                         config.filter_size,
                         strides=config.strides,
                         pool_fn=[tf.nn.max_pool] * len(config.pool_fn),
                         pool_size=config.pool_size,
                         pool_strides=config.pool_strides,
                         act_fn=[tf.nn.relu for aa in config.conv_act_fn],
                         add_bias=True,
                         init_std=config.conv_init_std,
                         init_method=config.conv_init_method,
                         wd=config.wd,
                         dtype=dtype,
                         batch_norm=True,
                         is_training=is_training,
                         ext_wts=ext_wts,
                         update_batch_stats=update_batch_stats)
            if self._embedding_weights is None:
                self._embedding_weights = wts
            h_shape = h.get_shape()
            h_size = 1
            for ss in h_shape[1:]:
                h_size *= int(ss)
            h = tf.reshape(h, [-1, h_size])
            self.adv_summaries.append(
                tf.summary.histogram('Encoded', h, family="phi_out"))
        return h
    def __init__(self,
                 config,
                 x,
                 y,
                 num_classes,
                 is_training=True,
                 dtype=tf.float32):
        """Constructor.

    Args:
      config:
      x:
      y:
      num_classes:
    """
        h, _ = cnn(x,
                   config.filter_size,
                   strides=config.strides,
                   pool_fn=[tf.nn.max_pool] * len(config.pool_fn),
                   pool_size=config.pool_size,
                   pool_strides=config.pool_strides,
                   act_fn=[tf.nn.relu for aa in config.conv_act_fn],
                   add_bias=True,
                   init_std=config.conv_init_std,
                   init_method=config.conv_init_method,
                   wd=config.wd,
                   dtype=dtype,
                   batch_norm=True,
                   is_training=is_training,
                   ext_wts=None)
        h_shape = h.get_shape()
        h_size = 1
        for ss in h_shape[1:]:
            h_size *= int(ss)
        h = tf.reshape(h, [-1, h_size])
        w_class = weight_variable([h_size, num_classes],
                                  init_method='truncated_normal',
                                  dtype=tf.float32,
                                  init_param={'stddev': 0.01},
                                  name='w_class')
        b_class = weight_variable([num_classes],
                                  init_method='constant',
                                  init_param={'val': 0.0},
                                  name='b_class')
        self._feature = h
        logits = tf.matmul(h, w_class) + b_class
        xent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
                                                              labels=y)
        xent = tf.reduce_mean(xent, name='xent')
        cost = xent
        cost += self._decay()
        self._cost = cost
        self._inputs = x
        self._labels = y
        global_step = tf.get_variable('global_step',
                                      shape=[],
                                      dtype=tf.int64,
                                      trainable=False)
        # Learning rate decay.
        learn_rate = tf.train.piecewise_constant(
            global_step,
            list(np.array(config.lr_decay_steps).astype(np.int64)),
            [config.learn_rate] + list(config.lr_list))
        self._learn_rate = learn_rate
        self._train_op = tf.train.AdamOptimizer(learn_rate).minimize(
            cost, global_step=global_step)

        correct = tf.equal(tf.argmax(logits, axis=1), y)
        self._acc = tf.reduce_mean(tf.cast(correct, dtype))