Esempio n. 1
0
 def _guide(cls, input_tensor, params, is_training):
   n_guide_feats = params['guide_complexity']
   guidemap = conv(input_tensor, n_guide_feats, 1, 
                   batch_norm=True, is_training=is_training,
                   scope='conv1')
   guidemap = conv(guidemap, 1, 1, activation_fn=tf.nn.sigmoid, scope='conv2')
   guidemap = tf.squeeze(guidemap, squeeze_dims=[3,])
   return guidemap
Esempio n. 2
0
 def _guide(cls, input_tensor, params, is_training):
   n_guide_feats = params['guide_complexity']
   guidemap = conv(input_tensor, n_guide_feats, 1, 
                   batch_norm=True, is_training=is_training,
                   scope='conv1')
   guidemap = conv(guidemap, 1, 1, activation_fn=tf.nn.sigmoid, scope='conv2')
   guidemap = tf.squeeze(guidemap, squeeze_dims=[3,])
   return guidemap
Esempio n. 3
0
  def _coefficients(cls, input_tensor, params, is_training):
    bs = input_tensor.get_shape().as_list()[0]
    gd = params['luma_bins']
    cm = params['channel_multiplier']
    spatial_bin = params['spatial_bin']

    # -----------------------------------------------------------------------
    with tf.variable_scope('splat'):
      n_ds_layers = int(np.log2(params['net_input_size']/spatial_bin))

      current_layer = input_tensor
      for i in range(n_ds_layers):
        if i > 0:  # don't normalize first layer
          use_bn = params['batch_norm']
        else:
          use_bn = False
        current_layer = conv(current_layer, cm*(2**i)*gd, 3, stride=2,
                             batch_norm=use_bn, is_training=is_training,
                             scope='conv{}'.format(i+1))

      splat_features = current_layer
    # -----------------------------------------------------------------------

    # -----------------------------------------------------------------------
    with tf.variable_scope('global'):
      n_global_layers = int(np.log2(spatial_bin/4))  # 4x4 at the coarsest lvl

      current_layer = splat_features
      for i in range(2):
        current_layer = conv(current_layer, 8*cm*gd, 3, stride=2,
            batch_norm=params['batch_norm'], is_training=is_training,
            scope="conv{}".format(i+1))
      _, lh, lw, lc = current_layer.get_shape().as_list()
      current_layer = tf.reshape(current_layer, [bs, lh*lw*lc])

      current_layer = fc(current_layer, 32*cm*gd, 
                         batch_norm=params['batch_norm'], is_training=is_training,
                         scope="fc1")
      current_layer = fc(current_layer, 16*cm*gd, 
                         batch_norm=params['batch_norm'], is_training=is_training,
                         scope="fc2")
      # don't normalize before fusion
      current_layer = fc(current_layer, 8*cm*gd, activation_fn=None, scope="fc3")
      global_features = current_layer
    # -----------------------------------------------------------------------

    # -----------------------------------------------------------------------
    with tf.variable_scope('local'):
      current_layer = splat_features
      current_layer = conv(current_layer, 8*cm*gd, 3, 
                           batch_norm=params['batch_norm'], 
                           is_training=is_training,
                           scope='conv1')
      # don't normalize before fusion
      current_layer = conv(current_layer, 8*cm*gd, 3, activation_fn=None,
                           use_bias=False, scope='conv2')
      grid_features = current_layer
    # -----------------------------------------------------------------------

    # -----------------------------------------------------------------------
    with tf.name_scope('fusion'):
      fusion_grid = grid_features
      fusion_global = tf.reshape(global_features, [bs, 1, 1, 8*cm*gd])
      fusion = tf.nn.relu(fusion_grid+fusion_global)
    # -----------------------------------------------------------------------

    # -----------------------------------------------------------------------
    with tf.variable_scope('prediction'):
      current_layer = fusion
      current_layer = conv(current_layer, gd*cls.n_out()*cls.n_in(), 1,
                                  activation_fn=None, scope='conv1')

      with tf.name_scope('unroll_grid'):
        current_layer = tf.stack(
            tf.split(current_layer, cls.n_out()*cls.n_in(), axis=3), axis=4)
        current_layer = tf.stack(
            tf.split(current_layer, cls.n_in(), axis=4), axis=5)
      tf.add_to_collection('packed_coefficients', current_layer)
    # -----------------------------------------------------------------------

    return current_layer
Esempio n. 4
0
  def _coefficients(cls, input_tensor, params, is_training):
    bs = input_tensor.get_shape().as_list()[0]
    gd = params['luma_bins']
    cm = params['channel_multiplier']
    spatial_bin = params['spatial_bin']

    # -----------------------------------------------------------------------
    with tf.variable_scope('splat'):
      n_ds_layers = int(np.log2(params['net_input_size']/spatial_bin))

      current_layer = input_tensor
      for i in range(n_ds_layers):
        if i > 0:  # don't normalize first layer
          use_bn = params['batch_norm']
        else:
          use_bn = False
        current_layer = conv(current_layer, cm*(2**i)*gd, 3, stride=2,
                             batch_norm=use_bn, is_training=is_training,
                             scope='conv{}'.format(i+1))

      splat_features = current_layer
    # -----------------------------------------------------------------------

    # -----------------------------------------------------------------------
    with tf.variable_scope('global'):
      n_global_layers = int(np.log2(spatial_bin/4))  # 4x4 at the coarsest lvl

      current_layer = splat_features
      for i in range(2):
        current_layer = conv(current_layer, 8*cm*gd, 3, stride=2,
            batch_norm=params['batch_norm'], is_training=is_training,
            scope="conv{}".format(i+1))
      _, lh, lw, lc = current_layer.get_shape().as_list()
      current_layer = tf.reshape(current_layer, [bs, lh*lw*lc])

      current_layer = fc(current_layer, 32*cm*gd, 
                         batch_norm=params['batch_norm'], is_training=is_training,
                         scope="fc1")
      current_layer = fc(current_layer, 16*cm*gd, 
                         batch_norm=params['batch_norm'], is_training=is_training,
                         scope="fc2")
      # don't normalize before fusion
      current_layer = fc(current_layer, 8*cm*gd, activation_fn=None, scope="fc3")
      global_features = current_layer
    # -----------------------------------------------------------------------

    # -----------------------------------------------------------------------
    with tf.variable_scope('local'):
      current_layer = splat_features
      current_layer = conv(current_layer, 8*cm*gd, 3, 
                           batch_norm=params['batch_norm'], 
                           is_training=is_training,
                           scope='conv1')
      # don't normalize before fusion
      current_layer = conv(current_layer, 8*cm*gd, 3, activation_fn=None,
                           use_bias=False, scope='conv2')
      grid_features = current_layer
    # -----------------------------------------------------------------------
    # take the sum of local feature and global feature
    # -----------------------------------------------------------------------
    with tf.name_scope('fusion'):
      fusion_grid = grid_features
      fusion_global = tf.reshape(global_features, [bs, 1, 1, 8*cm*gd])
      fusion = tf.nn.relu(fusion_grid+fusion_global)
    # -----------------------------------------------------------------------
    # take the linear prediction, gd*n_out*n_in = 96
    # -----------------------------------------------------------------------
    with tf.variable_scope('prediction'):
      current_layer = fusion
      current_layer = conv(current_layer, gd*cls.n_out()*cls.n_in(), 1,
                                  activation_fn=None, scope='conv1')

      with tf.name_scope('unroll_grid'):
        current_layer = tf.stack(
            tf.split(current_layer, cls.n_out()*cls.n_in(), axis=3), axis=4)
        current_layer = tf.stack(
            tf.split(current_layer, cls.n_in(), axis=4), axis=5)
      tf.add_to_collection('packed_coefficients', current_layer)
    # -----------------------------------------------------------------------

    return current_layer
Esempio n. 5
0
    def _coefficients(cls, input_tensor, params, is_training):
        bs = input_tensor.get_shape().as_list()[0]  # batch size
        gd = params['luma_bins']  # 8- grid for the guidance channel
        cm = params['channel_multiplier']  # 1- number of intermediate channels
        spatial_bin = params['spatial_bin']  # 16- grid for height&width

        # -----------------------------------------------------------------------
        with tf.variable_scope('splat'):
            n_ds_layers = int(np.log2(params['net_input_size'] / spatial_bin))

            current_layer = input_tensor
            for i in range(n_ds_layers):
                if i > 0:  # don't normalize first layer
                    use_bn = params['batch_norm']
                else:
                    use_bn = False
                current_layer = conv(current_layer,
                                     cm * (2**i) * gd,
                                     3,
                                     stride=2,
                                     batch_norm=use_bn,
                                     is_training=is_training,
                                     scope='conv{}'.format(i + 1))

            splat_features = current_layer
        # -----------------------------------------------------------------------

        # -----------------------------------------------------------------------
        with tf.variable_scope('global'):
            n_global_layers = int(np.log2(spatial_bin /
                                          4))  # 4x4 at the coarsest lvl

            current_layer = splat_features
            for i in range(2):  # shouldn't be n_global_layers???
                current_layer = conv(current_layer,
                                     8 * cm * gd,
                                     3,
                                     stride=2,
                                     batch_norm=params['batch_norm'],
                                     is_training=is_training,
                                     scope="conv{}".format(i + 1))
            _, lh, lw, lc = current_layer.get_shape().as_list()
            current_layer = tf.reshape(current_layer, [bs, lh * lw * lc])

            current_layer = fc(current_layer,
                               32 * cm * gd,
                               batch_norm=params['batch_norm'],
                               is_training=is_training,
                               scope="fc1")
            current_layer = fc(current_layer,
                               16 * cm * gd,
                               batch_norm=params['batch_norm'],
                               is_training=is_training,
                               scope="fc2")
            # don't normalize before fusion
            current_layer = fc(current_layer,
                               8 * cm * gd,
                               activation_fn=None,
                               scope="fc3")
            global_features = current_layer
        # -----------------------------------------------------------------------

        # -----------------------------------------------------------------------
        with tf.variable_scope('local'):
            current_layer = splat_features
            current_layer = conv(current_layer,
                                 8 * cm * gd,
                                 3,
                                 batch_norm=params['batch_norm'],
                                 is_training=is_training,
                                 scope='conv1')
            # don't normalize before fusion
            current_layer = conv(current_layer,
                                 8 * cm * gd,
                                 3,
                                 activation_fn=None,
                                 use_bias=False,
                                 scope='conv2')
            grid_features = current_layer
        # -----------------------------------------------------------------------

        # -----------------------------------------------------------------------
        with tf.name_scope('fusion'):
            fusion_grid = grid_features
            fusion_global = tf.reshape(global_features,
                                       [bs, 1, 1, 8 * cm * gd])
            fusion = tf.nn.relu(fusion_grid + fusion_global)
        # -----------------------------------------------------------------------

        # -----------------------------------------------------------------------
        with tf.variable_scope('prediction'):
            current_layer = fusion
            current_layer = conv(current_layer,
                                 gd * cls.n_out() * cls.n_in(),
                                 1,
                                 activation_fn=None,
                                 scope='conv1')

            with tf.name_scope('unroll_grid'):
                # splits ths bsx16x16x96 tensor to 12 tensors of bsx16x16x8 and than stacks them, so the result is bsx16x16x8x12
                current_layer = tf.stack(tf.split(current_layer,
                                                  cls.n_out() * cls.n_in(),
                                                  axis=3),
                                         axis=4)
                # splits the bsx16x16x8x12 to 4 tensors of bsx16x16x8x3 and then stacks them to bsx16x16x8x3x4
                current_layer = tf.stack(tf.split(current_layer,
                                                  cls.n_in(),
                                                  axis=4),
                                         axis=5)
            tf.add_to_collection('packed_coefficients', current_layer)
        # -----------------------------------------------------------------------

        return current_layer
Esempio n. 6
0
  def _coefficients(cls, input_tensor, params, is_training): # low-res coefficient prediction
    bs = input_tensor.get_shape().as_list()[0]
    gd = params['luma_bins']  # Number of BGU bins for the luminance.
    # # Bilateral grid parameters
    cm = params['channel_multiplier'] # Factor to control net throughput (number of intermediate channels).
    spatial_bin = params['spatial_bin'] # Size of the spatial BGU bins (pixels).

    # -----------------------------------------------------------------------
    # low-level features Si
    with tf.variable_scope('splat'):
      n_ds_layers = int(np.log2(params['net_input_size']/spatial_bin))

      current_layer = input_tensor
      for i in range(n_ds_layers): # 4个卷积层
        if i > 0:  # don't normalize first layer
          use_bn = params['batch_norm']
        else:
          use_bn = False
        current_layer = conv(current_layer, cm*(2**i)*gd, 3, stride=2, # 可推算出cm*gd=8
                             batch_norm=use_bn, is_training=is_training,
                             scope='conv{}'.format(i+1))

      splat_features = current_layer
    # -----------------------------------------------------------------------

    # -----------------------------------------------------------------------
    # 3.1.3 global features Gi 经过两层卷积层和三层全连接层得到全局特征
    with tf.variable_scope('global'):
      n_global_layers = int(np.log2(spatial_bin/4))  # 4x4 at the coarsest lvl

      current_layer = splat_features

      for i in range(2): # 两层卷积
        current_layer = conv(current_layer, 8*cm*gd, 3, stride=2,
            batch_norm=params['batch_norm'], is_training=is_training,
            scope="conv{}".format(i+1))

      _, lh, lw, lc = current_layer.get_shape().as_list()
      current_layer = tf.reshape(current_layer, [bs, lh*lw*lc])

      # 三层全连接层
      current_layer = fc(current_layer, 32*cm*gd, 
                         batch_norm=params['batch_norm'], is_training=is_training,
                         scope="fc1")
      current_layer = fc(current_layer, 16*cm*gd, 
                         batch_norm=params['batch_norm'], is_training=is_training,
                         scope="fc2")
      # don't normalize before fusion
      current_layer = fc(current_layer, 8*cm*gd, activation_fn=None, scope="fc3")

      global_features = current_layer # (1, 64)
    # -----------------------------------------------------------------------

    # -----------------------------------------------------------------------
    # 3.1.2 local features Li 经过两层卷积层后得到局部特征
    with tf.variable_scope('local'):
      current_layer = splat_features

      #两层卷积层
      current_layer = conv(current_layer, 8*cm*gd, 3, 
                           batch_norm=params['batch_norm'], 
                           is_training=is_training,
                           scope='conv1')
      # don't normalize before fusion
      current_layer = conv(current_layer, 8*cm*gd, 3, activation_fn=None,
                           use_bias=False, scope='conv2')

      grid_features = current_layer
    # -----------------------------------------------------------------------

    # -----------------------------------------------------------------------
    # 3.1.4 将局部特征与全局特征进行fusion
    # “fuse the contributions of the local and global paths with a pointwise affine mixing followed by a ReLU activation”
    with tf.name_scope('fusion'):
      fusion_grid = grid_features # (1, 16, 16, 64)
      fusion_global = tf.reshape(global_features, [bs, 1, 1, 8*cm*gd])  # (1, 1, 1, 64)
      fusion = tf.nn.relu(fusion_grid+fusion_global) # (1, 16, 16, 64) 公式(2),此处获得Fusion F
    # fusion is a 16*16*64 array of features
    # -----------------------------------------------------------------------

    # -----------------------------------------------------------------------
    # 3.1.4 linear prediction, from fusion we make our final 1*1 linear prediction to produce a 16*16 map with 96 channels
    with tf.variable_scope('prediction'):
      current_layer = fusion # (1,16,16,96)
      current_layer = conv(current_layer, gd*cls.n_out()*cls.n_in(), 1,
                                  activation_fn=None, scope='conv1') # 公式(3), 此处获得feature map A

      # 3.2 Image features as a bilateral grid
      with tf.name_scope('unroll_grid'): # 公式(4)
        current_layer = tf.stack(
            tf.split(current_layer, cls.n_out()*cls.n_in(), axis=3), axis=4) # (1,16,16,8,12)

        current_layer = tf.stack(
            tf.split(current_layer, cls.n_in(), axis=4), axis=5) # (1,16,16,8,3,4)

      tf.add_to_collection('packed_coefficients', current_layer)
    # -----------------------------------------------------------------------

    return current_layer
Esempio n. 7
0
    def _coefficients(cls, input_tensor, params, is_training):
        bs = input_tensor.get_shape().as_list()[0]
        gd = params['luma_bins']
        cm = params['channel_multiplier']
        spatial_bin = params['spatial_bin']

        # -----------------------------------------------------------------------
        with tf.variable_scope('splat'):
            # 系数网络层数,n_ds_layers默认是4
            n_ds_layers = int(np.log2(params['net_input_size'] / spatial_bin))

            current_layer = input_tensor
            for i in range(n_ds_layers):
                if i > 0:  # don't normalize first layer
                    use_bn = params['batch_norm']
                else:
                    use_bn = False
                # 经过4个卷积层,卷积核大小始终是3.由于滑动窗口stride=2,所以卷积后特征图尺寸不断减小
                # [-1,256,256,3]->[-1,128,128,8]
                # [-1,128,128,8]->[-1,64,64,16]
                # [-1,64,64,16]->[-1,32,32,32]
                # [-1,32,32,32]->[-1,16,16,64]
                current_layer = conv(current_layer,
                                     cm * (2**i) * gd,
                                     3,
                                     stride=2,
                                     batch_norm=use_bn,
                                     is_training=is_training,
                                     scope='conv{}'.format(i + 1))

            # [-1,16,16,64]
            splat_features = current_layer
        # -----------------------------------------------------------------------

        # -----------------------------------------------------------------------
        with tf.variable_scope('global'):
            # 再经过两层卷积得到全局特征图
            n_global_layers = int(np.log2(spatial_bin /
                                          4))  # 4x4 at the coarsest lvl

            current_layer = splat_features
            for i in range(2):
                # [-1,16,16,64]->[-1,8,8,64]
                # [-1,8,8,64]->[-1,4,4,64]
                current_layer = conv(current_layer,
                                     8 * cm * gd,
                                     3,
                                     stride=2,
                                     batch_norm=params['batch_norm'],
                                     is_training=is_training,
                                     scope="conv{}".format(i + 1))
                for j in range(0):
                    current_layer = conv(current_layer,
                                         8 * cm * gd,
                                         3,
                                         stride=1,
                                         batch_norm=params['batch_norm'],
                                         is_training=is_training,
                                         scope="conv{}_{}".format(
                                             i + 1, j + 1))
            _, lh, lw, lc = current_layer.get_shape().as_list()
            # 将全局特征图扁平化,[-1,4*4*64]
            current_layer = tf.reshape(current_layer, [bs, lh * lw * lc])

            # 全连接[-1,256]
            current_layer = fc(current_layer,
                               32 * cm * gd,
                               batch_norm=params['batch_norm'],
                               is_training=is_training,
                               scope="fc1")
            # [-1, 64]
            current_layer = fc(current_layer,
                               16 * cm * gd,
                               batch_norm=params['batch_norm'],
                               is_training=is_training,
                               scope="fc2")
            # don't normalize before fusion
            current_layer = fc(current_layer,
                               8 * cm * gd,
                               activation_fn=None,
                               scope="fc3")
            # [-1, 64]
            global_features = current_layer
        # -----------------------------------------------------------------------

        # -----------------------------------------------------------------------
        with tf.variable_scope('local'):
            # 将原图用滑动窗口降采样到16*16的小图,还是保留了一部分空间信息的,只不过比较粗糙
            # 用16*16的图记录空间信息,比如那个地方偏亮,哪个地方偏暗。
            # 如果不使用,将丢掉任何空间信息,论文有对比图。
            current_layer = splat_features
            current_layer = conv(current_layer,
                                 8 * cm * gd,
                                 3,
                                 batch_norm=params['batch_norm'],
                                 is_training=is_training,
                                 scope='conv1')
            # don't normalize before fusion
            for i in range(1):
                current_layer = conv(current_layer,
                                     8 * cm * gd,
                                     3,
                                     activation_fn=None,
                                     use_bias=False,
                                     scope='conv%d' % (i + 2))
            # [-1, 16, 16, 64]
            grid_features = current_layer
        # -----------------------------------------------------------------------

        # -----------------------------------------------------------------------
        with tf.name_scope('fusion'):
            fusion_grid = grid_features
            fusion_global = tf.reshape(global_features,
                                       [bs, 1, 1, 8 * cm * gd])
            # 将[-1,16,16,64]的局部特征图和[-1,64]个全局系数相加做融合
            fusion = tf.nn.relu(fusion_grid + fusion_global)
        # -----------------------------------------------------------------------

        # -----------------------------------------------------------------------
        # 将64个16*16的特征图片,映射到深度是8的双边网格,即16*16*8。每个格子中,又包含12(3*4)个系数矩阵w_c,predict_color=matmul([r,g,b,1], w_c)
        with tf.variable_scope('prediction'):
            # [-1,16,16,64]->[-1,16,16,96]
            current_layer = fusion
            current_layer = conv(current_layer,
                                 gd * cls.n_out() * cls.n_in(),
                                 1,
                                 activation_fn=None,
                                 scope='conv1')

            with tf.name_scope('unroll_grid'):
                # split后得到tensor数组[[-1,16,16,8] * 12]
                # stack后得到[-1,16,16,8,12]
                current_layer = tf.stack(tf.split(current_layer,
                                                  cls.n_out() * cls.n_in(),
                                                  axis=3),
                                         axis=4)
                # 将12个像素再分3*4组,得到[-1,16,16,8,3,4],这就是系数图
                current_layer = tf.stack(tf.split(current_layer,
                                                  cls.n_in(),
                                                  axis=4),
                                         axis=5)
            tf.add_to_collection('packed_coefficients', current_layer)
        # -----------------------------------------------------------------------

        return current_layer