Exemple #1
0
def resnet_conv(input,
                kernel_size,
                is_training,
                stride=1,
                num_outputs=None,
                normalizer_fn=None,
                activation_fn=None,
                scope=None):
    with variable_scope.variable_scope(scope, 'resnet_conv', [input]) as sc:
        last_dim = utils.last_dimension(input.get_shape())
        num_outputs = num_outputs or last_dim

        output = layers.convolution(inputs=input,
                                    num_outputs=last_dim,
                                    kernel_size=[1, 1, 1])

        output = layers.convolution(inputs=output,
                                    num_outputs=last_dim,
                                    kernel_size=[1, 1, kernel_size])

        output = layers.convolution(inputs=output,
                                    num_outputs=last_dim,
                                    kernel_size=[1, kernel_size, 1])

        output = layers.convolution(
            inputs=output,
            num_outputs=num_outputs,
            kernel_size=[kernel_size, 1, 1],
            activation_fn=activation_fn,
            normalizer_fn=normalizer_fn,
            normalizer_params={'is_training': is_training},
            stride=stride)

        return output
Exemple #2
0
def brightness_predict_net(inp_img, inp_wb, inp_iso, inp_t):

    h = tf.shape(inp_img)[1]
    w = tf.shape(inp_img)[2]
    inp_img_normed = normalize_var(inp_img, 0, 0.00018782060752181462)

    layer_inp_wb = tf.ones([1, h, w, 4]) * normalize_var(
        inp_wb, 1.5084804, 0.6111772 * 0.6111772)
    layer_inp_iso = tf.ones([1, h, w, 1]) * normalize_var(
        inp_iso, 471.72858, 264.23517 * 264.23517)
    layer_inp_t = tf.ones([1, h, w, 1]) * normalize_var(
        inp_t, 0.08489772, 0.1368367 * 0.1368367)


    inp_concated = tf.concat([  inp_img_normed, \
                                layer_inp_wb,   \
                                layer_inp_iso,  \
                                layer_inp_t],3)

    inp_concated.set_shape(shape=(pa.bs, None, None, 4 + 4 + 2))

    with tf.variable_scope('brightness_predict_net'):

        if pa.ps != 512:
            input_BatchRS = tf.image.resize_nearest_neighbor(
                inp_concated, (512, 512))
        else:
            input_BatchRS = inp_concated

        c1 = convolution(input_BatchRS, 16, [3, 3], stride=4, \
                        activation_fn=lrelu,trainable = pa.train_brightness_net)       #128*128*3
        c2 = convolution(c1, 16, [3, 3], stride=4, \
                        activation_fn=lrelu,trainable = pa.train_brightness_net)       #32*32*8
        c3 = convolution(c2, 32, [3, 3], stride=2, \
                        activation_fn=lrelu,trainable = pa.train_brightness_net)       #16*16*16
        c4 = convolution(c3, 32,  [3, 3], stride=2, \
                        activation_fn=lrelu,trainable = pa.train_brightness_net)       #8*8*32
        c5 = convolution(c4, 4, [3, 3], stride=2, \
                        activation_fn=lrelu,trainable = pa.train_brightness_net)       #4*4*4
        c5_vec = tf.reshape(c5, shape=[tf.shape(c5)[0], -1])
        c6_vec = layer_fc(c5_vec,
                          4 * 4 * 4,
                          32,
                          relu_free=False,
                          name='dense1',
                          trainable=pa.train_brightness_net)
        gt_time = layer_fc(c6_vec,
                           32,
                           1,
                           relu_free=True,
                           name='dense2',
                           trainable=pa.train_brightness_net)

        return gt_time
Exemple #3
0
def build_network(input_hold):
    net = input_hold
    _layers = {'input': net}

    # Block 1 : Separable CNN
    net = layers.separable_conv2d(
        inputs=net,
        num_outputs=32,
        kernel_size=4,
        stride=2,
        padding='VALID',
        depth_multiplier=4,
    )
    _layers['sepCNN1'] = net

    # Block 2 : Attention (with residual connection)
    net, att_layers = non_local_nn_2d(net,
                                      16,
                                      pool=False,
                                      name='non_local',
                                      return_layers=True)
    _layers['attention'] = att_layers['attention']
    _layers['NLNN'] = net

    # Block 3 : Convolution
    net = layers.convolution(inputs=net,
                             num_outputs=64,
                             kernel_size=3,
                             stride=2,
                             padding='VALID')
    _layers['CNN1'] = net
    net = layers.convolution(inputs=net,
                             num_outputs=64,
                             kernel_size=2,
                             stride=2,
                             padding='VALID')
    _layers['CNN2'] = net

    # Block 4 : Feature Vector
    net = layers.flatten(net)
    _layers['flat'] = net
    net = layers.fully_connected(
        net,
        128,
        activation_fn=None,
    )
    _layers['dense1'] = net

    return net, _layers
Exemple #4
0
def conv_model(feature, target, mode):
    """2-layer convolution model."""
    # Convert the target to a one-hot tensor of shape (batch_size, 10) and
    # with a on-value of 1 for each one-hot vector of length 10.
    target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)

    # Reshape feature to 4d tensor with 2nd and 3rd dimensions being
    # image width and height final dimension being the number of color channels.
    feature = tf.reshape(feature, [-1, 28, 28, 1])

    # First conv layer will compute 32 features for each 5x5 patch
    with tf.variable_scope('conv_layer1'):
        h_conv1 = layers.convolution(feature,
                                     32,
                                     kernel_size=[5, 5],
                                     activation_fn=tf.nn.relu)
        h_pool1 = max_pool_2x2(h_conv1)

    # Second conv layer will compute 64 features for each 5x5 patch.
    with tf.variable_scope('conv_layer2'):
        h_conv2 = layers.convolution(h_pool1,
                                     64,
                                     kernel_size=[5, 5],
                                     activation_fn=tf.nn.relu)
        h_pool2 = max_pool_2x2(h_conv2)
        # reshape tensor into a batch of vectors
        h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])

    # Densely connected layer with 1024 neurons.
    h_fc1 = layers.dropout(layers.fully_connected(h_pool2_flat,
                                                  1024,
                                                  activation_fn=tf.nn.relu),
                           keep_prob=0.5,
                           is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)

    # Compute logits (1 per class) and compute loss.
    logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
    loss = tf.contrib.losses.softmax_cross_entropy(logits, target)

    # Create a tensor for training op.
    train_op = layers.optimize_loss(loss,
                                    tf.contrib.framework.get_global_step(),
                                    optimizer='SGD',
                                    learning_rate=0.001)

    return tf.argmax(logits, 1), loss, train_op
Exemple #5
0
def down_layer(inputs, depth, model_config):
  """A single encoder layer."""
  normalizer, normalizer_params = get_normalizer_and_mode(model_config)
  return contrib_layers.convolution(
      inputs=inputs,
      num_outputs=depth,
      kernel_size=3,
      stride=2,
      padding='SAME',
      normalizer_fn=normalizer,
      normalizer_params=normalizer_params,
      activation_fn=tf.nn.leaky_relu)
Exemple #6
0
def conv_model(feature, target, mode):
  """2-layer convolution model."""
  # Convert the target to a one-hot tensor of shape (batch_size, 10) and
  # with a on-value of 1 for each one-hot vector of length 10.
  target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)

  # Reshape feature to 4d tensor with 2nd and 3rd dimensions being
  # image width and height final dimension being the number of color channels.
  feature = tf.reshape(feature, [-1, 28, 28, 1])

  # First conv layer will compute 32 features for each 5x5 patch
  with tf.variable_scope('conv_layer1'):
    h_conv1 = layers.convolution(feature, 32, kernel_size=[5, 5],
                                 activation_fn=tf.nn.relu)
    h_pool1 = max_pool_2x2(h_conv1)

  # Second conv layer will compute 64 features for each 5x5 patch.
  with tf.variable_scope('conv_layer2'):
    h_conv2 = layers.convolution(h_pool1, 64, kernel_size=[5, 5],
                                 activation_fn=tf.nn.relu)
    h_pool2 = max_pool_2x2(h_conv2)
    # reshape tensor into a batch of vectors
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])

  # Densely connected layer with 1024 neurons.
  h_fc1 = layers.dropout(
      layers.fully_connected(
          h_pool2_flat, 1024, activation_fn=tf.nn.relu), keep_prob=0.5,
      is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)

  # Compute logits (1 per class) and compute loss.
  logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
  loss = tf.contrib.losses.softmax_cross_entropy(logits, target)

  # Create a tensor for training op.
  train_op = layers.optimize_loss(
      loss, tf.contrib.framework.get_global_step(), optimizer='SGD',
      learning_rate=0.001)

  return tf.argmax(logits, 1), loss, train_op
Exemple #7
0
def conv_layer(inputs, depth, model_config):
  """A single 3x3 convolutional layer with stride 1."""
  normalizer, normalizer_params = get_normalizer_and_mode(model_config)
  ibs, ih, iw, ic = inputs.get_shape().as_list()
  outputs = contrib_layers.convolution(
      inputs=inputs,
      num_outputs=depth,
      kernel_size=3,
      stride=1,
      padding='SAME',
      normalizer_fn=normalizer,
      normalizer_params=normalizer_params,
      activation_fn=tf.nn.leaky_relu)
  obs, oh, ow, oc = outputs.get_shape().as_list()
  log.info(
      'Applying conv+relu layer. Input: [%i, %i, %i, %i]. Output: [%i, %i, %i, %i].'
      % (ibs, ih, iw, ic, obs, oh, ow, oc))
  return outputs
Exemple #8
0
def up_layer(inputs, spatial_dims, depth, model_config):
  """A single decoder layer."""
  normalizer, normalizer_params = get_normalizer_and_mode(model_config)
  if len(tf.shape(inputs)) != 4:
    if len(tf.shape(inputs)) != 5:
      raise ValueError('Unexpected input dimensionality: %i' %
                       len(tf.shape(inputs)))
    raise ValueError('3D Upsampling has not been implemented.')
  layer = contrib_layers.convolution(
      inputs=inputs,
      num_outputs=depth,
      kernel_size=5,
      padding='SAME',
      normalizer_fn=normalizer,
      normalizer_params=normalizer_params,
      activation_fn=tf.nn.leaky_relu)
  return tf.image.resize_images(
      images=layer, size=spatial_dims, align_corners=True)
Exemple #9
0
def res_net(
        features,
        res_blocks_size,
        num_classes,
        res_func,
        multi_k=1,
        is_training=True,
        keep_prob=1.0,
        is_add_multiplier=False,
        is_double_size=False,
        scope=None):
    with tf.variable_scope(scope, 'drl_mp', [features]):
        # 128 / 1
        net = features

        with tf.variable_scope('init'):
            first_output = np.array(res_blocks_size).flatten()[0]
            if(first_output < 16):
                raise ValueError("The first output size should be equal or greater than 16: {0}".format(first_output))

            net = layers.batch_norm(
                inputs=net,
                activation_fn=tf.nn.relu,
                is_training=is_training)

            # 64 / 4
            net = layers.convolution(
                inputs=net,
                num_outputs=4,
                kernel_size=[1, 1, 1],
                stride=2)

            if not is_double_size:
                # 32 / 8
                net = ops.resnet_reduction(
                    input=net,
                    kernel_size=3,
                    is_training=is_training)

            # 16 / 16
            net = ops.resnet_reduction(
                input=net,
                kernel_size=3,
                is_training=is_training)

        kpc = _KeepProbCalc(
            min_keep_prob=keep_prob,
            total_block_num=sum(len(x) for x in res_blocks_size))

        for block_num, block in enumerate(res_blocks_size):
            with tf.variable_scope(str.format('res_{0}', (block_num + 1))):
                for b_num, b in enumerate(block):
                    is_half_size = (block_num != 0 and b_num == 0)
                    residual_scope = str.format('res_{0}_{1}', (block_num + 1), (b_num + 1))
                    net = multi_residual(
                        inputs=net,
                        num_outputs=b,
                        multi_k=multi_k,
                        res_func=res_func,
                        scope=residual_scope,
                        keep_prob=kpc.get_next_decr_prob(),
                        is_training=is_training,
                        is_half_size=is_half_size,
                        is_add_multiplier=is_add_multiplier)

        with tf.variable_scope('unit_last'):
            net = layers.batch_norm(
                inputs=net,
                activation_fn=tf.nn.relu,
                is_training=is_training)
            net = tf.reduce_mean(net, [1, 2, 3])
            net = layers.fully_connected(
                inputs=net,
                num_outputs=num_classes,
                activation_fn=None,
                weights_initializer=tf.uniform_unit_scaling_initializer(factor=1.0),
                biases_initializer=tf.constant_initializer())

        return net
Exemple #10
0
def quality_pri_net(inp_img, inp_wb, inp_iso, inp_t, gt_t):

    h = tf.shape(inp_img)[1]
    w = tf.shape(inp_img)[2]
    inp_img_normed = normalize_var(inp_img, 0, 0.00018782060752181462)
    #inp_img_normed = tf.Print(inp_img_normed,['inp_img_normed:',tf.shape(inp_img)])

    layer_inp_wb = tf.ones([1, h, w, 4]) * normalize_var(
        inp_wb, 1.5084804, 0.6111772 * 0.6111772)
    layer_inp_iso = tf.ones([1, h, w, 1]) * normalize_var(
        inp_iso, 471.72858, 264.23517 * 264.23517)
    layer_inp_t = tf.ones([1, h, w, 1]) * normalize_var(
        inp_t, 0.08489772, 0.1368367 * 0.1368367)
    layer_gt_t = tf.ones([1, h, w, 1]) * normalize_var(gt_t, 0.42407602,
                                                       0.37255558 * 0.37255558)

    inp_concated = tf.concat([  inp_img_normed, \
                                layer_inp_wb,   \
                                layer_inp_iso,  \
                                layer_inp_t,    \
                                layer_gt_t],3)

    inp_concated.set_shape(shape=(pa.bs, None, None, 4 + 4 + 3))

    with tf.variable_scope('prefix_net'):
        conv0 = convolution(inp_concated,
                            16, [3, 3],
                            activation_fn=lrelu,
                            trainable=pa.train_prefix,
                            scope='g_conv0_1')
        conv0 = convolution(conv0,
                            16, [3, 3],
                            activation_fn=lrelu,
                            trainable=pa.train_prefix,
                            scope='g_conv0_2')
        conv0 = convolution(conv0,
                            32, [3, 3],
                            activation_fn=lrelu,
                            trainable=pa.train_prefix,
                            scope='g_conv0_3')
        conv0 = convolution(conv0,
                            32, [3, 3],
                            activation_fn=lrelu,
                            trainable=pa.train_prefix,
                            scope='g_conv0_4')
        convp1 = convolution(conv0,
                             32, [3, 3],
                             activation_fn=lrelu,
                             trainable=pa.train_prefix,
                             scope='g_conv0_5')
    with tf.variable_scope('quality_pri_net'):

        conv1 = convolution(convp1,
                            32, [3, 3],
                            activation_fn=lrelu,
                            trainable=pa.train_u_net,
                            scope='g_conv1_4')
        conv1 = convolution(conv1,
                            32, [3, 3],
                            activation_fn=lrelu,
                            trainable=pa.train_u_net,
                            scope='g_conv1_5')
        pool1 = pool2d(conv1, [2, 2], padding='SAME')

        conv2 = convolution(pool1,
                            64, [3, 3],
                            rate=1,
                            activation_fn=lrelu,
                            trainable=pa.train_u_net,
                            scope='g_conv2_1')
        conv2 = convolution(conv2,
                            64, [3, 3],
                            rate=1,
                            activation_fn=lrelu,
                            trainable=pa.train_u_net,
                            scope='g_conv2_2')
        pool2 = pool2d(conv2, [2, 2], padding='SAME')

        conv3 = convolution(pool2,
                            128, [3, 3],
                            rate=1,
                            activation_fn=lrelu,
                            trainable=pa.train_u_net,
                            scope='g_conv3_1')
        conv3 = convolution(conv3,
                            128, [3, 3],
                            rate=1,
                            activation_fn=lrelu,
                            trainable=pa.train_u_net,
                            scope='g_conv3_2')
        pool3 = pool2d(conv3, [2, 2], padding='SAME')

        conv4 = convolution(pool3,
                            256, [3, 3],
                            rate=1,
                            activation_fn=lrelu,
                            trainable=pa.train_u_net,
                            scope='g_conv4_1')
        conv4 = convolution(conv4,
                            256, [3, 3],
                            rate=1,
                            activation_fn=lrelu,
                            trainable=pa.train_u_net,
                            scope='g_conv4_2')
        pool4 = pool2d(conv4, [2, 2], padding='SAME')

        conv5 = convolution(pool4,
                            512, [3, 3],
                            rate=1,
                            activation_fn=lrelu,
                            trainable=pa.train_u_net,
                            scope='g_conv5_1')
        conv5 = convolution(conv5,
                            512, [3, 3],
                            rate=1,
                            activation_fn=lrelu,
                            trainable=pa.train_u_net,
                            scope='g_conv5_2')

        up6 = upsample_and_concat(conv5, conv4, 256, 512)
        conv6 = convolution(up6,
                            256, [3, 3],
                            rate=1,
                            activation_fn=lrelu,
                            trainable=pa.train_u_net,
                            scope='g_conv6_1')
        conv6 = convolution(conv6,
                            256, [3, 3],
                            rate=1,
                            activation_fn=lrelu,
                            trainable=pa.train_u_net,
                            scope='g_conv6_2')

        up7 = upsample_and_concat(conv6, conv3, 128, 256)
        conv7 = convolution(up7,
                            128, [3, 3],
                            rate=1,
                            activation_fn=lrelu,
                            trainable=pa.train_u_net,
                            scope='g_conv7_1')
        conv7 = convolution(conv7,
                            128, [3, 3],
                            rate=1,
                            activation_fn=lrelu,
                            trainable=pa.train_u_net,
                            scope='g_conv7_2')

        up8 = upsample_and_concat(conv7, conv2, 64, 128)
        conv8 = convolution(up8,
                            64, [3, 3],
                            rate=1,
                            activation_fn=lrelu,
                            trainable=pa.train_u_net,
                            scope='g_conv8_1')
        conv8 = convolution(conv8,
                            64, [3, 3],
                            rate=1,
                            activation_fn=lrelu,
                            trainable=pa.train_u_net,
                            scope='g_conv8_2')

        up9 = upsample_and_concat(conv8, conv1, 32, 64)
        conv9 = convolution(up9,
                            32, [3, 3],
                            rate=1,
                            activation_fn=lrelu,
                            trainable=pa.train_u_net,
                            scope='g_conv9_1')
        conv9 = convolution(conv9,
                            32, [3, 3],
                            rate=1,
                            activation_fn=lrelu,
                            trainable=pa.train_u_net,
                            scope='g_conv9_2')
        conv10 = convolution(conv9,
                             12, [1, 1],
                             rate=1,
                             activation_fn=None,
                             trainable=pa.train_u_net,
                             scope='g_conv10')
        out = tf.depth_to_space(conv10, 2)

        return out, convp1