示例#1
0
def build_model(data_tensor,
                reuse,
                training,
                output_shape,
                data_format='NHWC'):
    """Create the hgru from Learning long-range..."""
    if isinstance(output_shape, list):
        output_shape = output_shape[-1]
    elif isinstance(output_shape, dict):
        output_shape = output_shape['output']
    # norm_moments_training = training  # Force instance norm
    # normalization_type = 'no_param_batch_norm_original'
    normalization_type = 'no_param_instance_norm'
    # output_normalization_type = 'batch_norm_original_renorm'
    output_normalization_type = 'instance_norm'
    data_tensor, long_data_format = tf_fun.interpret_data_format(
        data_tensor=data_tensor, data_format=data_format)

    # Prepare gammanet structure
    (compression, ff_kernels, ff_repeats, features, fgru_kernels,
     additional_readouts) = v2_big_working()
    gammanet_constructor = tf_fun.get_gammanet_constructor(
        compression=compression,
        ff_kernels=ff_kernels,
        ff_repeats=ff_repeats,
        features=features,
        fgru_kernels=fgru_kernels)
    aux = get_aux()

    # Build model
    with tf.variable_scope('vgg', reuse=reuse):
        aux = get_aux()
        vgg = vgg16.Vgg16(
            vgg16_npy_path=
            '/media/data_cifs/clicktionary/pretrained_weights/vgg16.npy',
            reuse=reuse,
            aux=aux,
            train=training,
            timesteps=8,
            fgru_normalization_type=normalization_type,
            ff_normalization_type=normalization_type)
        vgg(rgb=data_tensor, constructor=gammanet_constructor)
        # activity = vgg.fgru_0

    with tf.variable_scope('fgru', reuse=reuse):
        # Get side weights
        h2_rem = [vgg.fgru_0]
        for idx, h in enumerate(h2_rem):
            res = normalization.apply_normalization(
                activity=h,
                name='output_norm1_%s' % idx,
                normalization_type=output_normalization_type,
                data_format=data_format,
                training=training,
                trainable=training,
                reuse=reuse)
            res = aux['image_resize'](res,
                                      data_tensor.get_shape().as_list()[1:3],
                                      align_corners=True)

        activity = tf.layers.conv2d(res,
                                    filters=res.get_shape().as_list()[-1],
                                    kernel_size=(3, 3),
                                    padding='same',
                                    data_format=long_data_format,
                                    name='out',
                                    activation=tf.nn.relu,
                                    trainable=training,
                                    use_bias=True,
                                    reuse=reuse)
        activity = tf.layers.conv2d(
            activity,
            filters=output_shape,
            kernel_size=(1, 1),
            padding='same',
            data_format=long_data_format,
            name='out2',
            # activation=tf.nn.relu,
            activation=None,
            trainable=training,
            use_bias=True,
            reuse=reuse)

    if long_data_format is 'channels_first':
        activity = tf.transpose(activity, (0, 2, 3, 1))
    extra_activities = {}  # idx: v for idx, v in enumerate(hs_0)}
    if activity.dtype != tf.float32:
        activity = tf.cast(activity, tf.float32)
    # return [activity, h_deep], extra_activities
    return activity, extra_activities
示例#2
0
def build_model(data_tensor,
                reuse,
                training,
                output_shape,
                data_format='NHWC'):
    """Create the hgru from Learning long-range..."""
    if isinstance(output_shape, list):
        output_shape = output_shape[-1]
    elif isinstance(output_shape, dict):
        output_shape = output_shape['output']
    # norm_moments_training = training  # Force instance norm
    # normalization_type = 'no_param_batch_norm_original'
    normalization_type = 'no_param_instance_norm'
    # output_normalization_type = 'batch_norm_original_renorm'
    output_normalization_type = 'instance_norm'
    data_tensor, long_data_format = tf_fun.interpret_data_format(
        data_tensor=data_tensor, data_format=data_format)

    # Prepare gammanet structure
    (compression, ff_kernels, ff_repeats, features, fgru_kernels,
     additional_readouts) = v2_big_working()
    gammanet_constructor = tf_fun.get_gammanet_constructor(
        compression=compression,
        ff_kernels=ff_kernels,
        ff_repeats=ff_repeats,
        features=features,
        fgru_kernels=fgru_kernels)
    aux = get_aux()

    # Build model
    with tf.variable_scope('vgg', reuse=reuse):
        aux = get_aux()
        vgg = vgg16.Vgg16(
            vgg16_npy_path=
            '/media/data_cifs/clicktionary/pretrained_weights/vgg16.npy',
            reuse=reuse,
            aux=aux,
            train=training,
            timesteps=8,
            fgru_normalization_type=normalization_type,
            ff_normalization_type=normalization_type)
        vgg(rgb=data_tensor, constructor=gammanet_constructor)
        # activity = vgg.fgru_0

    with tf.variable_scope('fgru', reuse=reuse):
        # Get side weights
        hs_0, hs_1 = [], []
        h2_rem = [vgg.fgru_0, vgg.fgru_1, vgg.fgru_2, vgg.conv5_1, vgg.fgru_3]
        for idx, h in enumerate(h2_rem):
            res = normalization.apply_normalization(
                activity=h,
                name='output_norm1_%s' % idx,
                normalization_type=output_normalization_type,
                data_format=data_format,
                training=training,
                trainable=training,
                reuse=reuse)
            res = aux['image_resize'](res,
                                      data_tensor.get_shape().as_list()[1:3],
                                      align_corners=True)
            cnv_0 = tf.layers.conv2d(
                inputs=res,
                filters=output_shape,
                kernel_size=(1, 1),
                padding='same',
                data_format=long_data_format,
                name='readout_aux_00_%s' % idx,
                # activation=tf.nn.relu,
                activation=None,
                trainable=training,
                use_bias=True,
                reuse=reuse)
            cnv_1 = tf.layers.conv2d(
                inputs=res,
                filters=output_shape,
                kernel_size=(1, 1),
                padding='same',
                data_format=long_data_format,
                name='readout_aux_10_%s' % idx,
                # activation=tf.nn.relu,
                activation=None,
                trainable=training,
                use_bias=True,
                reuse=reuse)

            hs_0 += [cnv_0]
            hs_1 += [cnv_1]

        s1, s2, s3, s4, s5 = hs_0
        s11, s21, s31, s41, s51 = hs_1
        o1 = tf.stop_gradient(tf.identity(s1))
        o2 = tf.stop_gradient(tf.identity(s2))
        o3 = tf.stop_gradient(tf.identity(s3))
        o4 = tf.stop_gradient(tf.identity(s4))
        o21 = tf.stop_gradient(tf.identity(s21))
        o31 = tf.stop_gradient(tf.identity(s31))
        o41 = tf.stop_gradient(tf.identity(s41))
        o51 = tf.stop_gradient(tf.identity(s51))

        p1_1 = s1
        p2_1 = s2 + o1
        p3_1 = s3 + o2 + o1
        p4_1 = s4 + o3 + o2 + o1
        p5_1 = s5 + o4 + o3 + o2 + o1
        p1_2 = s11 + o21 + o31 + o41 + o51
        p2_2 = s21 + o31 + o41 + o51
        p3_2 = s31 + o41 + o51
        p4_2 = s41 + o51
        p5_2 = s51

        hs = [p1_1, p2_1, p3_1, p4_1, p5_1, p1_2, p2_2, p3_2, p4_2, p5_2]

        activity = tf.layers.conv2d(
            tf.concat(hs, -1),
            filters=output_shape,
            kernel_size=(1, 1),
            padding='same',
            data_format=long_data_format,
            name='out',
            # activation=tf.nn.relu,
            activation=None,
            trainable=training,
            use_bias=False,
            reuse=reuse)

    if long_data_format is 'channels_first':
        activity = tf.transpose(activity, (0, 2, 3, 1))
    extra_activities = {idx: v for idx, v in enumerate(hs_0)}
    if activity.dtype != tf.float32:
        activity = tf.cast(activity, tf.float32)
    # return [activity, h_deep], extra_activities
    return activity, extra_activities