Ejemplo n.º 1
0
  def _maybe_calibrate_size(self, layers, out_filters, is_training):
    """Makes sure layers[0] and layers[1] have the same shapes."""

    hw = [self._get_HW(layer) for layer in layers]
    c = [self._get_C(layer) for layer in layers]

    with tf.variable_scope("calibrate"):
      x = layers[0]
      if hw[0] != hw[1]:
        assert hw[0] == 2 * hw[1]
        with tf.variable_scope("pool_x"):
          x = relu(x, self.leaky)
          x = self._factorized_reduction(x, out_filters, 2, is_training)
      elif c[0] != out_filters:
        with tf.variable_scope("pool_x"):
          w = create_weight("w", [1, 1, c[0], out_filters])
          x = relu(x, self.leaky)
          x = tf.nn.conv2d(x, w, [1, 1, 1, 1], "SAME",
                           data_format=self.data_format)
          x = batch_norm(x, is_training, data_format=self.data_format)

      y = layers[1]
      if c[1] != out_filters:
        with tf.variable_scope("pool_y"):
          w = create_weight("w", [1, 1, c[1], out_filters])
          y = relu(y, self.leaky)
          y = tf.nn.conv2d(y, w, [1, 1, 1, 1], "SAME",
                           data_format=self.data_format)
          y = batch_norm(y, is_training, data_format=self.data_format)
    return [x, y]
Ejemplo n.º 2
0
  def _enas_cell(self, x, curr_cell, prev_cell, op_id, out_filters):
    """Performs an enas operation specified by op_id."""

    num_possible_inputs = curr_cell + 1

    with tf.variable_scope("avg_pool"):
      avg_pool = tf.layers.average_pooling2d(
        x, [3, 3], [1, 1], "SAME", data_format=self.actual_data_format)
      avg_pool_c = self._get_C(avg_pool)
      if avg_pool_c != out_filters:
        with tf.variable_scope("conv"):
          w = create_weight(
            "w", [num_possible_inputs, avg_pool_c * out_filters])
          w = w[prev_cell]
          w = tf.reshape(w, [1, 1, avg_pool_c, out_filters])
          avg_pool = relu(avg_pool, self.leaky)
          avg_pool = tf.nn.conv2d(avg_pool, w, strides=[1, 1, 1, 1],
                                  padding="SAME", data_format=self.data_format)
          avg_pool = batch_norm(avg_pool, is_training=True,
                                data_format=self.data_format)

    with tf.variable_scope("max_pool"):
      max_pool = tf.layers.max_pooling2d(
        x, [3, 3], [1, 1], "SAME", data_format=self.actual_data_format)
      max_pool_c = self._get_C(max_pool)
      if max_pool_c != out_filters:
        with tf.variable_scope("conv"):
          w = create_weight(
            "w", [num_possible_inputs, max_pool_c * out_filters])
          w = w[prev_cell]
          w = tf.reshape(w, [1, 1, max_pool_c, out_filters])
          max_pool = relu(max_pool, self.leaky)
          max_pool = tf.nn.conv2d(max_pool, w, strides=[1, 1, 1, 1],
                                  padding="SAME", data_format=self.data_format)
          max_pool = batch_norm(max_pool, is_training=True,
                                data_format=self.data_format)

    x_c = self._get_C(x)
    if x_c != out_filters:
      with tf.variable_scope("x_conv"):
        w = create_weight("w", [num_possible_inputs, x_c * out_filters])
        w = w[prev_cell]
        w = tf.reshape(w, [1, 1, x_c, out_filters])
        x = relu(x, self.leaky)
        x = tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding="SAME",
                         data_format=self.data_format)
        x = batch_norm(x, is_training=True, data_format=self.data_format)

    out = [
      self._enas_conv(x, curr_cell, prev_cell, 3, out_filters),
      self._enas_conv(x, curr_cell, prev_cell, 5, out_filters),
      avg_pool,
      max_pool,
      x,
    ]

    out = tf.stack(out, axis=0)
    out = out[op_id, :, :, :, :]
    return out
Ejemplo n.º 3
0
  def _fixed_conv(self, x, f_size, out_filters, stride, is_training,
                  stack_convs=2):
    """Apply fixed convolution.

    Args:
      stacked_convs: number of separable convs to apply.
    """

    for conv_id in range(stack_convs):
      inp_c = self._get_C(x)
      if conv_id == 0:
        strides = self._get_strides(stride)
      else:
        strides = [1, 1, 1, 1]

      with tf.variable_scope("sep_conv_{}".format(conv_id)):
        w_depthwise = create_weight("w_depth", [f_size, f_size, inp_c, 1])
        w_pointwise = create_weight("w_point", [1, 1, inp_c, out_filters])
        x = relu(x, self.leaky)
        x = tf.nn.separable_conv2d(
          x,
          depthwise_filter=w_depthwise,
          pointwise_filter=w_pointwise,
          strides=strides, padding="SAME", data_format=self.data_format)
        x = batch_norm(x, is_training, data_format=self.data_format)

    return x
Ejemplo n.º 4
0
  def _enas_conv(self, x, curr_cell, prev_cell, filter_size, out_filters,
                 stack_conv=2):
    """Performs an enas convolution specified by the relevant parameters."""

    with tf.variable_scope("conv_{0}x{0}".format(filter_size)):
      num_possible_inputs = curr_cell + 2
      for conv_id in range(stack_conv):
        with tf.variable_scope("stack_{0}".format(conv_id)):
          # create params and pick the correct path
          inp_c = self._get_C(x)
          w_depthwise = create_weight(
            "w_depth", [num_possible_inputs, filter_size * filter_size * inp_c])
          w_depthwise = w_depthwise[prev_cell, :]
          w_depthwise = tf.reshape(
            w_depthwise, [filter_size, filter_size, inp_c, 1])

          w_pointwise = create_weight(
            "w_point", [num_possible_inputs, inp_c * out_filters])
          w_pointwise = w_pointwise[prev_cell, :]
          w_pointwise = tf.reshape(w_pointwise, [1, 1, inp_c, out_filters])

          with tf.variable_scope("bn"):
            zero_init = tf.initializers.zeros(dtype=tf.float32)
            one_init = tf.initializers.ones(dtype=tf.float32)
            offset = create_weight(
              "offset", [num_possible_inputs, out_filters],
              initializer=zero_init)
            scale = create_weight(
              "scale", [num_possible_inputs, out_filters],
              initializer=one_init)
            offset = offset[prev_cell]
            scale = scale[prev_cell]

          # the computations
          x = relu(x, self.leaky)
          x = tf.nn.separable_conv2d(
            x,
            depthwise_filter=w_depthwise,
            pointwise_filter=w_pointwise,
            strides=[1, 1, 1, 1], padding="SAME",
            data_format=self.data_format)
          x, _, _ = tf.nn.fused_batch_norm(
            x, scale, offset, epsilon=1e-5, data_format=self.data_format,
            is_training=True)
    return x
Ejemplo n.º 5
0
  def _enas_layer(self, layer_id, prev_layers, arc, out_filters):
    """
    Args:
      layer_id: current layer
      prev_layers: cache of previous layers. for skip connections
      start_idx: where to start looking at. technically, we can infer this
        from layer_id, but why bother...
    """

    assert len(prev_layers) == 2, "need exactly 2 inputs"
    layers = [prev_layers[0], prev_layers[1]]
    layers = self._maybe_calibrate_size(layers, out_filters, is_training=True)
    used = []
    for cell_id in range(self.num_cells):
      prev_layers = tf.stack(layers, axis=0)
      with tf.variable_scope("cell_{0}".format(cell_id)):
        with tf.variable_scope("x"):
          x_id = arc[4 * cell_id]
          x_op = arc[4 * cell_id + 1]
          x = prev_layers[x_id, :, :, :, :]
          x = self._enas_cell(x, cell_id, x_id, x_op, out_filters)
          x_used = tf.one_hot(x_id, depth=self.num_cells + 2, dtype=tf.int32)

        with tf.variable_scope("y"):
          y_id = arc[4 * cell_id + 2]
          y_op = arc[4 * cell_id + 3]
          y = prev_layers[y_id, :, :, :, :]
          y = self._enas_cell(y, cell_id, y_id, y_op, out_filters)
          y_used = tf.one_hot(y_id, depth=self.num_cells + 2, dtype=tf.int32)

        out = x + y
        used.extend([x_used, y_used])
        layers.append(out)

    used = tf.add_n(used)
    indices = tf.where(tf.equal(used, 0))
    indices = tf.to_int32(indices)
    indices = tf.reshape(indices, [-1])
    num_outs = tf.size(indices)
    out = tf.stack(layers, axis=0)
    out = tf.gather(out, indices, axis=0)

    inp = prev_layers[0]
    if self.data_format == "NHWC":
      N = tf.shape(inp)[0]
      H = tf.shape(inp)[1]
      W = tf.shape(inp)[2]
      C = tf.shape(inp)[3]
      out = tf.transpose(out, [1, 2, 3, 0, 4])
      out = tf.reshape(out, [N, H, W, num_outs * out_filters])
    elif self.data_format == "NCHW":
      N = tf.shape(inp)[0]
      C = tf.shape(inp)[1]
      H = tf.shape(inp)[2]
      W = tf.shape(inp)[3]
      out = tf.transpose(out, [1, 0, 2, 3, 4])
      out = tf.reshape(out, [N, num_outs * out_filters, H, W])
    else:
      raise ValueError("Unknown data_format '{0}'".format(self.data_format))

    with tf.variable_scope("final_conv"):
      w = create_weight("w", [self.num_cells + 2, out_filters * out_filters])
      w = tf.gather(w, indices, axis=0)
      w = tf.reshape(w, [1, 1, num_outs * out_filters, out_filters])
      out = relu(out, self.leaky)
      out = tf.nn.conv2d(out, w, strides=[1, 1, 1, 1], padding="SAME",
                         data_format=self.data_format)
      out = batch_norm(out, is_training=True, data_format=self.data_format)

    out = tf.reshape(out, tf.shape(prev_layers[0]))

    return out
Ejemplo n.º 6
0
  def _fixed_layer(self, layer_id, prev_layers, arc, out_filters, stride,
                   is_training, normal_or_reduction_cell="normal"):
    """
    Args:
      prev_layers: cache of previous layers. for skip connections
      is_training: for batch_norm
    """

    assert len(prev_layers) == 2
    layers = [prev_layers[0], prev_layers[1]]
    layers = self._maybe_calibrate_size(layers, out_filters,
                                        is_training=is_training)

    with tf.variable_scope("layer_base"):
      x = layers[1]
      inp_c = self._get_C(x)
      w = create_weight("w", [1, 1, inp_c, out_filters])
      x = relu(x, self.leaky)
      x = tf.nn.conv2d(x, w, [1, 1, 1, 1], "SAME",
                       data_format=self.data_format)
      x = batch_norm(x, is_training, data_format=self.data_format)
      layers[1] = x

    used = np.zeros([self.num_cells + 2], dtype=np.int32)
    f_sizes = [3, 5]
    for cell_id in range(self.num_cells):
      with tf.variable_scope("cell_{}".format(cell_id)):
        x_id = arc[4 * cell_id]
        used[x_id] += 1
        x_op = arc[4 * cell_id + 1]
        x = layers[x_id]
        x_stride = stride if x_id in [0, 1] else 1
        with tf.variable_scope("x_conv"):
          if x_op in [0, 1]:
            f_size = f_sizes[x_op]
            x = self._fixed_conv(x, f_size, out_filters, x_stride, is_training)
          elif x_op in [2, 3]:
            inp_c = self._get_C(x)
            if x_op == 2:
              x = tf.layers.average_pooling2d(
                x, [3, 3], [x_stride, x_stride], "SAME",
                data_format=self.actual_data_format)
            else:
              x = tf.layers.max_pooling2d(
                x, [3, 3], [x_stride, x_stride], "SAME",
                data_format=self.actual_data_format)
            if inp_c != out_filters:
              w = create_weight("w", [1, 1, inp_c, out_filters])
              x = relu(x, self.leaky)
              x = tf.nn.conv2d(x, w, [1, 1, 1, 1], "SAME",
                               data_format=self.data_format)
              x = batch_norm(x, is_training, data_format=self.data_format)
          else:
            inp_c = self._get_C(x)
            if x_stride > 1:
              assert x_stride == 2
              x = self._factorized_reduction(x, out_filters, 2, is_training)
            if inp_c != out_filters:
              w = create_weight("w", [1, 1, inp_c, out_filters])
              x = relu(x, self.leaky)
              x = tf.nn.conv2d(x, w, [1, 1, 1, 1], "SAME", data_format=self.data_format)
              x = batch_norm(x, is_training, data_format=self.data_format)
          if (x_op in [0, 1, 2, 3] and
              self.drop_path_keep_prob is not None and
              is_training):
            x = self._apply_drop_path(x, layer_id)

        y_id = arc[4 * cell_id + 2]
        used[y_id] += 1
        y_op = arc[4 * cell_id + 3]
        y = layers[y_id]
        y_stride = stride if y_id in [0, 1] else 1
        with tf.variable_scope("y_conv"):
          if y_op in [0, 1]:
            f_size = f_sizes[y_op]
            y = self._fixed_conv(y, f_size, out_filters, y_stride, is_training)
          elif y_op in [2, 3]:
            inp_c = self._get_C(y)
            if y_op == 2:
              y = tf.layers.average_pooling2d(
                y, [3, 3], [y_stride, y_stride], "SAME",
                data_format=self.actual_data_format)
            else:
              y = tf.layers.max_pooling2d(
                y, [3, 3], [y_stride, y_stride], "SAME",
                data_format=self.actual_data_format)
            if inp_c != out_filters:
              w = create_weight("w", [1, 1, inp_c, out_filters])
              y = relu(y, self.leaky)
              y = tf.nn.conv2d(y, w, [1, 1, 1, 1], "SAME",
                               data_format=self.data_format)
              y = batch_norm(y, is_training, data_format=self.data_format)
          else:
            inp_c = self._get_C(y)
            if y_stride > 1:
              assert y_stride == 2
              y = self._factorized_reduction(y, out_filters, 2, is_training)
            if inp_c != out_filters:
              w = create_weight("w", [1, 1, inp_c, out_filters])
              y = relu(y, self.leaky)
              y = tf.nn.conv2d(y, w, [1, 1, 1, 1], "SAME",
                               data_format=self.data_format)
              y = batch_norm(y, is_training, data_format=self.data_format)

          if (y_op in [0, 1, 2, 3] and
              self.drop_path_keep_prob is not None and
              is_training):
            y = self._apply_drop_path(y, layer_id)

        out = x + y
        layers.append(out)
    out = self._fixed_combine(layers, used, out_filters, is_training,
                              normal_or_reduction_cell)

    return out