コード例 #1
0
def build_model(data_tensor, reuse, training, output_shape):
    """Create the hgru from Learning long-range..."""
    if isinstance(output_shape, list):
        output_shape = output_shape[0]
    with tf.variable_scope('cnn', reuse=reuse):
        # Add input
        in_emb = conv.skinny_input_layer(
            X=data_tensor,
            reuse=reuse,
            training=training,
            features=24,
            conv_activation=tf.nn.elu,
            conv_kernel_size=7,
            pool=False,
            name='l0')
        layer_hgru = feedback_hgru.hGRU(
            layer_name='hgru_1',
            x_shape=in_emb.get_shape().as_list(),
            timesteps=8,
            h_ext=[[9, 9], [5, 5], [1, 1]],
            strides=[1, 1, 1, 1],
            pool_strides=[4, 4],
            padding='SAME',
            aux={
                'symmetric_weights': True,
                'dilations': [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
                'batch_norm': True,
                'while_loop': False,
                'nonnegative': True,
                'residual': True,
                'final_bn': False,
                'renorm': False,
                'lesion_alpha': True,
                'lesion_omega': True,
                'pooling_kernel': [4, 4],
                'intermediate_ff': [24, 24],  # + filters,
                'intermediate_ks': [[5, 5], [5, 5]]},
            train=training)
        h2 = layer_hgru.build(in_emb)
        nh2 = normalization.batch(
            bottom=h2,
            name='hgru_bn',
            fused=True,
            renorm=True,
            training=training)
        activity = conv.conv_layer(
            bottom=nh2,
            name='pre_readout_conv',
            num_filters=output_shape['output'],
            kernel_size=1,
            trainable=training,
            use_bias=True)

    extra_activities = {
        # 'activity': h2
    }
    return activity, extra_activities
コード例 #2
0
def build_model(data_tensor, reuse, training, output_shape):
    """Create the hgru from Learning long-range..."""
    if isinstance(output_shape, list):
        output_shape = output_shape[0]
    with tf.variable_scope('cnn', reuse=reuse):
        # Add input
        in_emb = conv.skinny_input_layer(
            X=data_tensor,
            reuse=reuse,
            training=training,
            features=24,
            conv_activation=tf.nn.elu,
            conv_kernel_size=7,
            pool=False,
            name='l0')
        layer_hgru = hgru.hGRU(
            'fgru',
            x_shape=in_emb.get_shape().as_list(),
            timesteps=8,
            h_ext=[{'h1': [9, 9]}, {'h2': [5, 5]}, {'fb1': [1, 1]}],
            strides=[1, 1, 1, 1],
            hgru_ids=[{'h1': 24}, {'h2': 24}, {'fb1': 24}],
            hgru_idx=[{'h1': 0}, {'h2': 1}, {'fb1': 2}],
            padding='SAME',
            aux={
                'readout': 'fb',
                'intermediate_ff': [24, 24],
                'intermediate_ks': [[5, 5], [5, 5]],
                'intermediate_repeats': [2, 2],
                'while_loop': False,
                'skip': False,
                'symmetric_weights': True,
                'use_homunculus': False,
                'include_pooling': True
            },
            pool_strides=[4, 4],
            pooling_kernel=[4, 4],
            train=training)
        h2 = layer_hgru.build(in_emb)
        nh2 = normalization.batch(
            bottom=h2,
            name='hgru_bn',
            fused=True,
            renorm=True,
            training=training)
        activity = conv.conv_layer(
            bottom=nh2,
            name='pre_readout_conv',
            num_filters=output_shape['output'],
            kernel_size=1,
            trainable=training,
            use_bias=True)

    extra_activities = {
        # 'activity': h2
    }
    return activity, extra_activities
コード例 #3
0
def build_model(data_tensor, reuse, training, output_shape):
    """Create the hgru from Learning long-range..."""
    if isinstance(output_shape, list):
        output_shape = output_shape[0]
    with tf.variable_scope('cnn', reuse=reuse):
        # Add input
        in_emb = conv.skinny_input_layer(
            X=data_tensor,
            reuse=reuse,
            training=training,
            features=20,
            conv_activation=tf.nn.relu,
            conv_kernel_size=7,
            pool_kernel_size=[1, 2, 2, 1],
            pool_kernel_strides=[1, 2, 2, 1],
            pool=True,
            name='l0')
        layer_hgru = hgru.hGRU(
            'fgru',
            x_shape=in_emb.get_shape().as_list(),
            timesteps=8,
            h_ext=[{'h1': [15, 15]}, {'h2': [1, 1]}, {'fb1': [1, 1]}],
            strides=[1, 1, 1, 1],
            hgru_ids=[{'h1': 20}, {'h2': 128}, {'fb1': 20}],
            hgru_idx=[{'h1': 0}, {'h2': 1}, {'fb1': 2}],
            padding='SAME',
            aux={
                'readout': 'fb',
                'intermediate_ff': [32, 128],
                'intermediate_ks': [[3, 3], [3, 3]],
                'intermediate_repeats': [3, 3],
                'while_loop': False,
                'skip': False,
                'symmetric_weights': True,
                'include_pooling': True
            },
            pool_strides=[2, 2],
            pooling_kernel=[2, 2],
            train=training)
        h2 = layer_hgru.build(in_emb)
        h2 = normalization.batch(
            bottom=h2,
            renorm=False,
            name='hgru_bn',
            training=training)
        activity = conv.readout_layer(
            activity=h2,
            reuse=reuse,
            training=training,
            output_shape=output_shape)
    extra_activities = {
        'activity': h2
    }
    return activity, extra_activities
コード例 #4
0
def build_model(data_tensor, reuse, training, output_shape):
    """Create the hgru from Learning long-range..."""
    if isinstance(output_shape, list):
        output_shape = output_shape[0]
    with tf.variable_scope('cnn', reuse=reuse):
        # Add input
        in_emb = conv.skinny_input_layer(X=data_tensor,
                                         reuse=reuse,
                                         training=training,
                                         features=24,
                                         conv_activation=tf.nn.elu,
                                         conv_kernel_size=7,
                                         pool=False,
                                         name='l0')
        layer_hgru = hgru.hGRU('hgru_1',
                               x_shape=in_emb.get_shape().as_list(),
                               timesteps=8,
                               h_ext=9,
                               strides=[1, 1, 1, 1],
                               padding='SAME',
                               aux={
                                   'reuse': False,
                                   'constrain': False,
                                   'nonnegative': True
                               },
                               train=training)
        h2 = layer_hgru.build(in_emb)
        h2 = normalization.batch(bottom=h2,
                                 renorm=True,
                                 name='hgru_bn',
                                 training=training)
        activity = conv.conv_layer(bottom=h2,
                                   name='pre_readout_conv',
                                   num_filters=output_shape['output'],
                                   kernel_size=1,
                                   trainable=training,
                                   use_bias=True)
    extra_activities = {}
    return activity, extra_activities
コード例 #5
0
def build_model(data_tensor,
                reuse,
                training,
                output_shape,
                data_format='NCHW'):
    """Create the hgru from Learning long-range..."""
    if isinstance(output_shape, list):
        output_shape = output_shape[-1]
    elif isinstance(output_shape, dict):
        output_shape = output_shape['output']
    if data_format is 'NCHW':
        data_tensor = tf.transpose(data_tensor, (0, 3, 1, 2))
    with tf.variable_scope('cnn', reuse=reuse):
        normalization_type = 'ada_batch_norm'  #
        # # Concatenate standard deviation
        # _, var = tf.nn.moments(data_tensor, axes=[3])
        # std = tf.expand_dims(tf.sqrt(var), axis=-1)
        # data_tensor = tf.concat([data_tensor, std], axis=-1)

        # Add input
        in_emb = conv.skinny_input_layer(X=data_tensor,
                                         reuse=reuse,
                                         training=training,
                                         features=16,
                                         conv_activation=tf.nn.relu,
                                         conv_kernel_size=7,
                                         pool=False,
                                         data_format=data_format,
                                         name='l0')

        # Run fGRU
        hgru_kernels = OrderedDict()
        hgru_kernels['h1'] = [9, 9]  # height/width
        hgru_kernels['h2'] = [5, 5]
        hgru_kernels['fb1'] = [1, 1]
        hgru_features = OrderedDict()
        hgru_features['h1'] = [16, 16]  # Fan-in/fan-out, I and E (match fb1)
        hgru_features['h2'] = [32, 32]
        hgru_features['fb1'] = [16, 16]  # (match h1)
        # hgru_features['fb1'] = [24, 12]  # (match h1 unless squeeze_fb)
        intermediate_ff = [24, 28, 32]  # Last feature must match h2
        intermediate_ks = [[5, 5], [5, 5], [5, 5]]
        intermediate_repeats = [1, 1, 1]  # Repeat each interm this many times
        gammanet_layer = gammanet.GN(
            layer_name='fgru',
            x=in_emb,
            data_format=data_format,
            reuse=reuse,
            timesteps=8,
            strides=[1, 1, 1, 1],
            hgru_features=hgru_features,
            hgru_kernels=hgru_kernels,
            intermediate_ff=intermediate_ff,
            intermediate_ks=intermediate_ks,
            intermediate_repeats=intermediate_repeats,
            padding='SAME',
            aux={
                'readout': 'fb',
                'squeeze_fb': False,  # Compress Inh-hat with a 1x1 conv
                'attention': 'gala',  # 'gala',  # 'gala', se
                'attention_layers': 2,
                'saliency_filter': 5,
                'use_homunculus': True,
                'upsample_convs': True,
                'td_cell_state': False,
                'td_gate': False,  # Add top-down activity to the in-gate
                'normalization_type': normalization_type,
                'excite_se': False,  # Add S/E in the excitation stage
                'residual': True,  # intermediate resid connections
                'while_loop': True,
                'skip': True,
                'time_skips': False,
                'force_horizontal': False,
                'symmetric_weights': True,
                'timestep_output': False,
                'bilinear_init': True,
                'include_pooling': True
            },
            pool_strides=[2, 2],
            pooling_kernel=[2, 2],
            up_kernel=[4, 4],
            train=training)
        h2 = gammanet_layer(in_emb)
        if normalization_type is 'batch_norm':
            h2 = normalization.batch_contrib(bottom=h2,
                                             renorm=False,
                                             name='hgru_bn',
                                             dtype=h2.dtype,
                                             data_format=data_format,
                                             training=training)
        elif normalization_type is 'instance_norm':
            h2 = normalization.instance(bottom=h2,
                                        data_format=data_format,
                                        training=training)
        elif normalization_type is 'ada_batch_norm':
            h2 = normalization.batch_contrib(bottom=h2,
                                             renorm=False,
                                             name='hgru_bn',
                                             dtype=h2.dtype,
                                             data_format=data_format,
                                             training=training)
        else:
            raise NotImplementedError(normalization_type)
    with tf.variable_scope('cv_readout', reuse=reuse):
        if data_format == 'NCHW':
            data_format = 'channels_first'
        elif data_format == 'NHWC':
            data_format = 'channels_last'
        activity = tf.layers.conv2d(inputs=h2,
                                    filters=output_shape,
                                    kernel_size=(1, 1),
                                    padding='same',
                                    data_format=data_format,
                                    name='pre_readout_conv',
                                    use_bias=True,
                                    reuse=reuse)
    if data_format is 'channels_first':
        activity = tf.transpose(activity, (0, 2, 3, 1))
    extra_activities = {'activity': h2}
    if activity.dtype != tf.float32:
        activity = tf.cast(activity, tf.float32)
    return activity, extra_activities
コード例 #6
0
def build_model(data_tensor, reuse, training, output_shape):
    """Create the hgru from Learning long-range..."""
    if isinstance(output_shape, list):
        output_shape = output_shape[0]
    elif isinstance(output_shape, dict):
        output_shape = output_shape['output']
    with tf.variable_scope('cnn', reuse=reuse):
        normalization_type = 'ada_batch_norm'  # 'instance_norm'
        # # Concatenate standard deviation
        # _, var = tf.nn.moments(data_tensor, axes=[3])
        # std = tf.expand_dims(tf.sqrt(var), axis=-1)
        # data_tensor = tf.concat([data_tensor, std], axis=-1)

        # Add input
        in_emb = conv.skinny_input_layer(X=data_tensor,
                                         reuse=reuse,
                                         training=training,
                                         features=16,
                                         conv_activation=tf.nn.relu,
                                         conv_kernel_size=7,
                                         pool=False,
                                         name='l0')

        # Run fGRU
        hgru_kernels = OrderedDict()
        hgru_kernels['h1'] = [15, 15]  # height/width
        hgru_kernels['h2'] = [5, 5]
        hgru_kernels['fb1'] = [1, 1]
        hgru_features = OrderedDict()
        hgru_features['h1'] = [16, 16]  # Fan-in/fan-out, I and E (match fb1)
        hgru_features['h2'] = [48, 48]
        hgru_features['fb1'] = [16, 16]  # (match h1)
        # hgru_features['fb1'] = [24, 12]  # (match h1 unless squeeze_fb)
        intermediate_ff = [24, 24, 48]  # Last feature must match h2
        intermediate_ks = [[5, 5], [5, 5], [5, 5]]
        intermediate_repeats = [1, 1, 1]  # Repeat each interm this many times
        layer_hgru = hgru.hGRU(
            'fgru',
            x_shape=in_emb.get_shape().as_list(),
            timesteps=8,
            strides=[1, 1, 1, 1],
            hgru_features=hgru_features,
            hgru_kernels=hgru_kernels,
            intermediate_ff=intermediate_ff,
            intermediate_ks=intermediate_ks,
            intermediate_repeats=intermediate_repeats,
            padding='SAME',
            aux={
                'readout': 'fb',
                'squeeze_fb': False,  # Compress Inh-hat with a 1x1 conv
                'td_gate': True,  # Add top-down activity to the in-gate
                'attention': 'gala',  # 'gala',
                'attention_layers': 2,
                'upsample_convs': True,
                'td_cell_state': True,
                'normalization_type': normalization_type,
                'excite_se': False,  # Add S/E in the excitation stage
                'residual': True,  # intermediate resid connections
                'while_loop': False,
                'skip': True,
                'time_skips': False,
                'force_horizontal': False,
                'symmetric_weights': True,
                'timestep_output': False,
                'bilinear_init': True,
                'include_pooling': True
            },
            pool_strides=[2, 2],
            pooling_kernel=[4, 4],
            up_kernel=[4, 4],
            train=training)
        h2 = layer_hgru.build(in_emb)
        if normalization_type is 'batch_norm':
            h2 = normalization.batch(bottom=h2,
                                     renorm=False,
                                     name='hgru_bn',
                                     training=training)
        elif normalization_type is 'instance_norm':
            h2 = normalization.instance(bottom=h2, training=training)
        elif normalization_type is 'ada_batch_norm':
            h2 = normalization.batch(bottom=h2,
                                     renorm=False,
                                     name='hgru_bn',
                                     training=True)
        else:
            raise NotImplementedError(normalization_type)
        fc = tf.layers.conv2d(inputs=h2,
                              filters=output_shape,
                              kernel_size=1,
                              name='fc')
        # activity = tf.reduce_mean(fc, reduction_indices=[1, 2])
        activity = tf.reduce_max(fc, reduction_indices=[1, 2])
    extra_activities = {'activity': h2}
    return activity, extra_activities