def identity_block(input_tensor, kernel_size, filters, stage, block):
    """The identity block is the block that has no conv layer at shortcut.

    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of
            middle conv layer at main path
        filters: list of integers, the filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names

    # Returns
        Output tensor for the block.
    """
    print_warn(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> identity block")
    filters1, filters2, filters3 = filters
    bn_axis = 3

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    print_info(input_tensor)
    # >>>>>>>>>>>>>>>>>
    x = layers.Conv2D(filters1, (1, 1),
                      kernel_initializer='he_normal',
                      name=conv_name_base + '2a')(input_tensor)
    print_info(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    print_info(x)
    x = layers.Activation('relu')(x)
    print_info(x)

    # >>>>>>>>>>>>>>>>>
    x = layers.Conv2D(filters2,
                      kernel_size,
                      padding='same',
                      kernel_initializer='he_normal',
                      name=conv_name_base + '2b')(x)
    print_info(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    print_info(x)
    x = layers.Activation('relu')(x)
    print_info(x)

    # >>>>>>>>>>>>>>>>>
    x = layers.Conv2D(filters3, (1, 1),
                      kernel_initializer='he_normal',
                      name=conv_name_base + '2c')(x)
    print_info(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
    print_info(x)
    x = layers.add([x, input_tensor])
    print_info(x)
    x = layers.Activation('relu')(x)
    print_info(x)

    return x
def conv_block(input_tensor,
               kernel_size,
               filters,
               stage,
               block,
               strides=(2, 2)):
    """A block that has a conv layer at shortcut.

    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of
            middle conv layer at main path
        filters: list of integers, the filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
        strides: Strides for the first conv layer in the block.

    # Returns
        Output tensor for the block.

    Note that from stage 3,
    the first conv layer at main path is with strides=(2, 2)
    And the shortcut should have strides=(2, 2) as well
    """
    print_warn(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> conv block")

    filters1, filters2, filters3 = filters

    bn_axis = 3
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    print_info(input_tensor)
    # >>>>>>>>>>>>>>>>>
    x = layers.Conv2D(filters1, (1, 1),
                      strides=strides,
                      kernel_initializer='he_normal',
                      name=conv_name_base + '2a')(input_tensor)
    print_info(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    print_info(x)
    x = layers.Activation('relu')(x)
    print_info(x)

    # >>>>>>>>>>>>>>>>>
    x = layers.Conv2D(filters2,
                      kernel_size,
                      padding='same',
                      kernel_initializer='he_normal',
                      name=conv_name_base + '2b')(x)
    print_info(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    print_info(x)
    x = layers.Activation('relu')(x)
    print_info(x)

    # >>>>>>>>>>>>>>>>>
    x = layers.Conv2D(filters3, (1, 1),
                      kernel_initializer='he_normal',
                      name=conv_name_base + '2c')(x)
    print_info(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
    print_info(x)

    # >>>>>>>>>>>>>>>>>
    shortcut = layers.Conv2D(filters3, (1, 1),
                             strides=strides,
                             kernel_initializer='he_normal',
                             name=conv_name_base + '1')(input_tensor)
    print_info(shortcut)
    shortcut = layers.BatchNormalization(axis=bn_axis,
                                         name=bn_name_base + '1')(shortcut)
    print_info(shortcut)

    x = layers.add([x, shortcut])
    print_info(x)
    x = layers.Activation('relu')(x)
    print_info(x)

    return x
def model(images, text_scale=512, weight_decay=1e-5, is_training=True):
    """
    define the model, we use Keras implemention of resnet
    """
    images = mean_image_subtraction(images)

    bn_axis = 3

    end_points = dict()

    print_warn(">>>>>>>>>>>>>>> Model Definition Started: ")
    print_warn(images)
    # http://ethereon.github.io/netscope/#/gist/db945b393d40bfa26006
    x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(images)
    print_warn(x)
    x = layers.Conv2D(64, (7, 7),
                      strides=(2, 2),
                      padding='valid',
                      kernel_initializer='he_normal',
                      name='conv1')(x)
    print_warn(x)
    x = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    print_warn(x)
    x = layers.Activation('relu')(x)
    print_warn(x)
    x = layers.ZeroPadding2D(padding=(1, 1), name='pool1_pad')(x)
    print_warn(x)
    x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
    print_warn(x)

    print_warn(">>>>>>>>>>>>>>> Resnet Definition Started: ")
    print_warn(">>>>> pool2")
    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    print_warn(x)
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    print_warn(x)
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
    print_warn(x)

    end_points["pool2"] = x

    print_warn(">>>>> pool3")
    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    print_warn(x)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    print_warn(x)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    print_warn(x)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
    print_warn(x)
    end_points["pool3"] = x

    print_warn(">>>>> pool4")

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    print_warn(x)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    print_warn(x)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    print_warn(x)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    print_warn(x)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    print_warn(x)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
    print_info(x)
    end_points["pool4"] = x

    print_warn(">>>>> pool5")
    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    print_warn(x)
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    print_warn(x)
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
    print_warn(x)

    end_points["pool5"] = x

    f = [
        end_points['pool5'], end_points['pool4'], end_points['pool3'],
        end_points['pool2']
    ]

    for i in range(4):
        logging.info('Shape of f_{} : {}'.format(i, f[i].shape))

    g = [None, None, None, None]
    h = [None, None, None, None]
    num_outputs = [None, 128, 64, 32]

    for i in range(4):
        if i == 0:
            h[i] = f[i]
        else:
            c1_1 = layers.Conv2D(filters=num_outputs[i],
                                 kernel_size=1)(tf.concat([g[i - 1], f[i]],
                                                          axis=-1))
            # slim.conv2d(tf.concat([g[i-1], f[i]], axis=-1), num_outputs[i], 1)
            h[i] = layers.Conv2D(filters=num_outputs[i],
                                 kernel_size=3,
                                 padding="same")(c1_1)  #TODO kernel size to 3
            # slim.conv2d(c1_1, num_outputs[i], 3)
        if i <= 2:
            g[i] = unpool(h[i])
        else:
            g[i] = layers.Conv2D(filters=num_outputs[i],
                                 kernel_size=3,
                                 padding="same")(h[i])  #TODO kernel size to 3
            # slim.conv2d(h[i], num_outputs[i], 3)
        logging.info('Shape of h_{} : {}, g_{} : {}'.format(
            i, h[i].shape, i, g[i].shape))

    # here we use a slightly different way for regression part,
    # we first use a sigmoid to limit the regression range, and also
    # this is do with the angle map
    F_score = layers.Conv2D(filters=1, kernel_size=1,
                            activation=tf.nn.sigmoid)(g[3])
    # slim.conv2d(g[3], 1, 1, activation_fn=tf.nn.sigmoid, normalizer_fn=None)
    # 4 channel of axis aligned bbox and 1 channel rotation angle
    geo_map = layers.Conv2D(filters=4, kernel_size=1,
                            activation=tf.nn.sigmoid)(g[3])
    # slim.conv2d(g[3], 4, 1, activation_fn=tf.nn.sigmoid, normalizer_fn=None) * FLAGS.text_scale
    angle_map = layers.Conv2D(filters=1,
                              kernel_size=1,
                              activation=tf.nn.sigmoid)(g[3])
    # (slim.conv2d(g[3], 1, 1, activation_fn=tf.nn.sigmoid, normalizer_fn=None) - 0.5) * np.pi/2 # angle is between [-45, 45]
    F_geometry = tf.concat([geo_map, angle_map], axis=-1)

    return F_score, F_geometry