Ejemplo n.º 1
0
def build_model(data_tensor, reuse, training):
    """Create the hgru from Learning long-range..."""
    with tf.variable_scope('cnn', reuse=reuse):
        with tf.variable_scope('input', reuse=reuse):
            conv_aux = {
                'pretrained': os.path.join('weights',
                                           'gabors_for_contours_7.npy'),
                'pretrained_key': 's1',
                'nonlinearity': 'square'
            }
            x = conv.conv_layer(bottom=data_tensor,
                                name='gabor_input',
                                stride=[1, 1, 1, 1],
                                padding='SAME',
                                trainable=training,
                                use_bias=True,
                                aux=conv_aux)
            layer_hgru = hgru.hGRU('hgru_1',
                                   x_shape=x.get_shape().as_list(),
                                   timesteps=8,
                                   h_ext=15,
                                   strides=[1, 1, 1, 1],
                                   padding='SAME',
                                   aux={
                                       'lesion_alpha': True,
                                       'lesion_omega': True
                                   },
                                   train=training)
            h2 = layer_hgru.build(x)
            h2 = normalization.batch(
                bottom=h2,
                reuse=reuse,
                # renorm=True,
                name='hgru_bn',
                training=training)

        with tf.variable_scope('readout_1', reuse=reuse):
            activity = conv.conv_layer(bottom=h2,
                                       name='pre_readout_conv',
                                       num_filters=2,
                                       kernel_size=1,
                                       trainable=training,
                                       use_bias=False)
            pool_aux = {'pool_type': 'max'}
            activity = pooling.global_pool(bottom=activity,
                                           name='pre_readout_pool',
                                           aux=pool_aux)
            activity = normalization.batch(
                bottom=activity,
                reuse=reuse,
                # renorm=True,
                name='readout_1_bn',
                training=training)

        with tf.variable_scope('readout_2', reuse=reuse):
            activity = tf.layers.flatten(activity, name='flat_readout')
            activity = tf.layers.dense(inputs=activity, units=2)
    return activity, h2
Ejemplo n.º 2
0
    def just_ff(self, x, l0_h1, l0_h2):
        # HGRU
        l0_h1, l0_h2 = self.hgru0.run(x, l0_h1, l0_h2)
        ff = tf.contrib.layers.batch_norm(
            inputs=l0_h2,
            scale=True,
            center=True,
            fused=True,
            renorm=False,
            reuse=False,
            scope=None,
            param_initializers=self.bn_param_initializer,
            updates_collections=None,
            is_training=self.train)
        ff = tf.nn.relu(ff)
        ff_list = []
        ff_list.append(ff)

        # FEEDFORWARD
        for idx, (conv_fsiz, conv_k, conv_str, pool_fsiz,
                  pool_str) in enumerate(
                      zip(self.ff_conv_fsiz, self.ff_conv_k,
                          self.ff_conv_strides, self.ff_pool_fsiz,
                          self.ff_pool_strides)):
            with tf.variable_scope(self.var_scope + '/ff_%s' % idx,
                                   reuse=tf.AUTO_REUSE):
                weights = tf.get_variable("weights")
            # POOL
            ff = max_pool(bottom=ff,
                          k=[1] + pool_fsiz + [1],
                          s=[1] + pool_str + [1],
                          name='ff_pool_hgru')
            # CONV
            ff = tf.nn.conv2d(input=ff,
                              filter=weights,
                              strides=conv_str,
                              padding='SAME')
            ff = tf.contrib.layers.batch_norm(
                inputs=ff,
                scale=True,
                center=True,
                fused=True,
                renorm=False,
                reuse=False,
                scope=None,
                param_initializers=self.bn_param_initializer,
                updates_collections=None,
                is_training=self.train)
            ff = tf.nn.relu(ff)
            ff_list.append(ff)

        # GLOBAL POOL and then TILE
        if self.use_global_pool:
            top_map_shape = ff_list[-1].get_shape().as_list()
            ff = global_pool(bottom=ff, name='global_pool', aux={})
        return ff
Ejemplo n.º 3
0
def readout_layer(
        activity,
        reuse,
        training,
        output_shape,
        dtype=tf.float32,
        var_scope='readout_1',
        pool_type='max',
        renorm=False,
        use_bn=False,
        features=2,
        return_fc=False,):
    """Readout layer for recurrent experiments in Kim et al., 2019."""
    with tf.variable_scope(var_scope, reuse=reuse):
        prepool_activity = tf.layers.conv2d(
            inputs=activity,
            filters=features,
            kernel_size=1,
            name='pre_readout_conv',
            strides=(1, 1),
            padding='same',
            activation=None,
            trainable=training,
            use_bias=False)
        pool_aux = {'pool_type': pool_type}
        if pool_type == 'select':
            # Gather center column of activity
            raise NotImplementedError
            act_shape = prepool_activity.get_shape().as_list()
            h = act_shape[1] // 2
            w = act_shape[2] // 2
            activity = tf.squeeze(prepool_activity[:, h, w, :], [1, 2])
        else:
            activity = pooling.global_pool(
                bottom=prepool_activity,
                name='pre_readout_pool',
                aux=pool_aux)
        if use_bn:
            activity = normalization.batch_contrib(
                bottom=activity,
                renorm=renorm,
                dtype=dtype,
                name='readout_1_bn',
                training=training)
    with tf.variable_scope('readout_2', reuse=reuse):
        out_activity = tf.layers.flatten(
            activity,
            name='flat_readout')
        out_activity = tf.layers.dense(
            inputs=out_activity,
            units=output_shape)
    if return_fc:
        return out_activity, prepool_activity
    else:
        return out_activity
Ejemplo n.º 4
0
def build_model(data_tensor, reuse, training):
    """Create the gru from Learning long-range..."""
    with tf.variable_scope('cnn', reuse=reuse):
        with tf.variable_scope('input', reuse=reuse):
            conv_aux = {
                'pretrained': os.path.join('weights',
                                           'gabors_for_contours_7.npy'),
                'pretrained_key': 's1',
                'nonlinearity': 'square'
            }
            x = conv.conv_layer(bottom=data_tensor,
                                name='gabor_input',
                                stride=[1, 1, 1, 1],
                                padding='SAME',
                                trainable=training,
                                use_bias=True,
                                aux=conv_aux)
            activity = conv.conv_layer(bottom=x,
                                       name='c1',
                                       num_filters=9,
                                       kernel_size=20,
                                       trainable=training,
                                       use_bias=False)
            activity = normalization.batch(bottom=activity,
                                           name='c1_bn',
                                           training=training)
            activity = tf.nn.relu(activity)

        with tf.variable_scope('readout_1', reuse=reuse):
            activity = conv.conv_layer(bottom=activity,
                                       name='pre_readout_conv',
                                       num_filters=2,
                                       kernel_size=1,
                                       trainable=training,
                                       use_bias=False)
            pool_aux = {'pool_type': 'max'}
            activity = pooling.global_pool(bottom=activity,
                                           name='pre_readout_pool',
                                           aux=pool_aux)
            activity = normalization.batch(bottom=activity,
                                           name='readout_1_bn',
                                           training=training)

        with tf.variable_scope('readout_2', reuse=reuse):
            activity = tf.layers.flatten(activity, name='flat_readout')
            activity = tf.layers.dense(inputs=activity, units=2)
    extra_activities = {'activity': activity}
    return activity, extra_activities
Ejemplo n.º 5
0
def build_model(data_tensor, reuse, training):
    """Create the hgru from Learning long-range..."""
    with tf.variable_scope('cnn', reuse=reuse):
        with tf.variable_scope('input', reuse=reuse):
            x = tf.layers.conv2d(inputs=data_tensor,
                                 filters=24,
                                 kernel_size=3,
                                 padding='same',
                                 trainable=training,
                                 name='c1_1',
                                 use_bias=True)
            x = tf.layers.conv2d(inputs=x,
                                 filters=24,
                                 kernel_size=3,
                                 padding='same',
                                 trainable=training,
                                 name='c1_2',
                                 use_bias=True)
            x = tf.layers.conv2d(inputs=x,
                                 filters=24,
                                 kernel_size=3,
                                 padding='same',
                                 trainable=training,
                                 name='c1_3',
                                 use_bias=True)

        with tf.variable_scope('hGRU', reuse=reuse):
            layer_hgru = hgru.hGRU('hgru',
                                   x_shape=x.get_shape().as_list(),
                                   timesteps=6,
                                   h_ext=7,
                                   strides=[1, 1, 1, 1],
                                   padding='SAME',
                                   aux={
                                       'readout': 'fb',
                                       'pooling_kernel': [1, 4, 4, 1],
                                       'intermediate_ff': [24, 24, 24],
                                       'intermediate_ks': [[3, 3], [3, 3],
                                                           [3, 3]],
                                   },
                                   pool_strides=[1, 4, 4, 1],
                                   train=training)
            x = layer_hgru.build(x)
            x = normalization.batch(bottom=x,
                                    name='hgru_bn',
                                    fused=True,
                                    training=training)
            fb = tf.identity(x)

        with tf.variable_scope('readout_1', reuse=reuse):
            x = conv.conv_layer(bottom=x,
                                name='pre_readout_conv',
                                num_filters=2,
                                kernel_size=1,
                                trainable=training,
                                use_bias=True)
            pool_aux = {'pool_type': 'max'}
            x = pooling.global_pool(bottom=x,
                                    name='pre_readout_pool',
                                    aux=pool_aux)
            x = normalization.batch(bottom=x,
                                    name='hgru_bn',
                                    training=training)

        with tf.variable_scope('readout_2', reuse=reuse):
            x = tf.layers.flatten(x, name='flat_readout')
            x = tf.layers.dense(inputs=x, units=2)
    extra_activities = {'activity': fb}
    return activity, extra_activities
Ejemplo n.º 6
0
def build_model(data_tensor, reuse, training):
    """Create the hgru from Learning long-range..."""
    down_pool_kernel = [1, 2, 2, 1]
    down_pool_strides = [1, 2, 2, 1]
    down_pool_padding = 'SAME'
    with tf.variable_scope('cnn', reuse=reuse):
        # Unclear if we should include l0 in the down/upsample cascade
        with tf.variable_scope('g1', reuse=reuse):
            # Downsample
            act11 = conv_block(x=data_tensor,
                               name='l1_1',
                               filters=64,
                               training=training,
                               reuse=reuse,
                               pool=False)
            act12 = conv_block(x=act11,
                               name='l1_2',
                               filters=64,
                               training=training,
                               reuse=reuse,
                               pool=False)
            poolact12, poolact12inds = tf.nn.max_pool_with_argmax(
                input=act12,
                ksize=down_pool_kernel,
                strides=down_pool_strides,
                padding=down_pool_padding,
                name='l1_2_pool')

        with tf.variable_scope('g2', reuse=reuse):
            # Downsample
            act21 = conv_block(x=act12,
                               name='l2_1',
                               filters=128,
                               training=training,
                               reuse=reuse,
                               pool=False)
            act22 = conv_block(x=act21,
                               filters=128,
                               name='l2_2',
                               training=training,
                               reuse=reuse,
                               pool=False)
            poolact22, poolact22inds = tf.nn.max_pool_with_argmax(
                input=act22,
                ksize=down_pool_kernel,
                strides=down_pool_strides,
                padding=down_pool_padding,
                name='l2_2_pool')

        with tf.variable_scope('g3', reuse=reuse):
            # Downsample
            act31 = conv_block(x=poolact22,
                               name='l3_1',
                               filters=256,
                               training=training,
                               reuse=reuse,
                               pool=False)
            act32 = conv_block(x=act31,
                               filters=256,
                               name='l3_2',
                               training=training,
                               reuse=reuse,
                               pool=False)
            act33 = conv_block(x=act32,
                               filters=256,
                               name='l3_3',
                               training=training,
                               reuse=reuse,
                               pool=False)
            poolact33, poolact33inds = tf.nn.max_pool_with_argmax(
                input=act33,
                ksize=down_pool_kernel,
                strides=down_pool_strides,
                padding=down_pool_padding,
                name='l3_3_pool')

        with tf.variable_scope('g4', reuse=reuse):
            # Downsample
            act41 = conv_block(x=poolact33,
                               name='l4_1',
                               filters=512,
                               training=training,
                               reuse=reuse,
                               pool=False)
            act42 = conv_block(x=act41,
                               filters=512,
                               name='l4_2',
                               training=training,
                               reuse=reuse,
                               pool=False)
            act43 = conv_block(x=act42,
                               filters=512,
                               name='l4_3',
                               training=training,
                               reuse=reuse,
                               pool=False)
            poolact43, poolact43inds = tf.nn.max_pool_with_argmax(
                input=act43,
                ksize=down_pool_kernel,
                strides=down_pool_strides,
                padding=down_pool_padding,
                name='l4_3_pool')

        with tf.variable_scope('g5', reuse=reuse):
            # Downsample
            act51 = conv_block(x=poolact43,
                               name='l5_1',
                               filters=512,
                               training=training,
                               reuse=reuse,
                               pool=False)
            act52 = conv_block(x=act51,
                               filters=512,
                               name='l5_2',
                               training=training,
                               reuse=reuse,
                               pool=False)
            act53 = conv_block(x=act52,
                               filters=512,
                               name='l5_3',
                               training=training,
                               reuse=reuse,
                               pool=False)
            poolact53, poolact53inds = tf.nn.max_pool_with_argmax(
                input=act53,
                ksize=down_pool_kernel,
                strides=down_pool_strides,
                padding=down_pool_padding,
                name='l5_3_pool')

        with tf.variable_scope('g5_up', reuse=reuse):
            upact5 = pooling.unpool_with_argmax_layer(bottom=poolact53,
                                                      ind=poolact53inds,
                                                      filter_size=[3, 3],
                                                      name='l5_unpool')
            uact53 = conv_block(x=upact5,
                                name='ul5_3',
                                filters=512,
                                training=training,
                                reuse=reuse,
                                pool=False)
            uact52 = conv_block(x=uact53,
                                filters=512,
                                name='ul5_2',
                                training=training,
                                reuse=reuse,
                                pool=False)
            uact51 = conv_block(x=uact52,
                                filters=512,
                                name='ul5_1',
                                training=training,
                                reuse=reuse,
                                pool=False)

        with tf.variable_scope('g4_up', reuse=reuse):
            upact4 = pooling.unpool_with_argmax_layer(bottom=uact51,
                                                      ind=poolact43inds,
                                                      filter_size=[3, 3],
                                                      name='l4_unpool')
            uact43 = conv_block(x=upact4,
                                name='ul4_3',
                                filters=512,
                                training=training,
                                reuse=reuse,
                                pool=False)
            uact42 = conv_block(x=uact43,
                                filters=512,
                                name='ul4_2',
                                training=training,
                                reuse=reuse,
                                pool=False)
            uact41 = conv_block(x=uact42,
                                filters=256,
                                name='ul4_1',
                                training=training,
                                reuse=reuse,
                                pool=False)

        with tf.variable_scope('g3_up', reuse=reuse):
            upact3 = pooling.unpool_with_argmax_layer(bottom=uact41,
                                                      ind=poolact33inds,
                                                      filter_size=[3, 3],
                                                      name='l3_unpool')
            uact33 = conv_block(x=upact3,
                                name='ul3_3',
                                filters=256,
                                training=training,
                                reuse=reuse,
                                pool=False)
            uact32 = conv_block(x=uact33,
                                filters=256,
                                name='ul3_2',
                                training=training,
                                reuse=reuse,
                                pool=False)
            uact31 = conv_block(x=uact32,
                                filters=128,
                                name='ul3_1',
                                training=training,
                                reuse=reuse,
                                pool=False)

        with tf.variable_scope('g2_up', reuse=reuse):
            upact2 = pooling.unpool_with_argmax_layer(bottom=uact31,
                                                      ind=poolact22inds,
                                                      filter_size=[3, 3],
                                                      name='l2_unpool')
            uact22 = conv_block(x=upact2,
                                name='ul2_2',
                                filters=128,
                                training=training,
                                reuse=reuse,
                                pool=False)
            uact21 = conv_block(x=uact22,
                                name='ul2_1',
                                filters=64,
                                training=training,
                                reuse=reuse,
                                pool=False)

        with tf.variable_scope('g1_up', reuse=reuse):
            upact1 = pooling.unpool_with_argmax_layer(bottom=uact21,
                                                      ind=poolact12inds,
                                                      filter_size=[3, 3],
                                                      name='l1_unpool')
            uact12 = conv_block(x=upact1,
                                name='ul1_2',
                                filters=64,
                                training=training,
                                reuse=reuse,
                                pool=False)
            uact11 = conv_block(x=uact12,
                                name='ul1_1',
                                filters=64,
                                training=training,
                                reuse=reuse,
                                pool=False)

        with tf.variable_scope('readout_1', reuse=reuse):
            activity = conv.conv_layer(bottom=uact11,
                                       name='pre_readout_conv',
                                       num_filters=2,
                                       kernel_size=1,
                                       trainable=training,
                                       use_bias=False)
            pool_aux = {'pool_type': 'max'}
            activity = pooling.global_pool(bottom=activity,
                                           name='pre_readout_pool',
                                           aux=pool_aux)
            activity = normalization.batch(bottom=activity,
                                           renorm=True,
                                           name='readout_1_bn',
                                           training=training)

        with tf.variable_scope('readout_2', reuse=reuse):
            activity = tf.layers.flatten(activity, name='flat_readout')
            activity = tf.layers.dense(inputs=activity, units=2)
    extra_activities = {'activity': activity}

    return activity, extra_activities
Ejemplo n.º 7
0
    def full(self, i0, x, l0_h1, l0_h2, td0_h1):
        # HGRU
        l0_h1, l0_h2 = self.hgru0.run(x, l0_h1, l0_h2)
        ff = tf.contrib.layers.batch_norm(
            inputs=l0_h2,
            scale=True,
            center=True,
            fused=True,
            renorm=False,
            reuse=False,
            scope=None,
            param_initializers=self.bn_param_initializer,
            updates_collections=None,
            is_training=self.train)
        ff = tf.nn.relu(ff)
        ff_list = []
        ff_list.append(ff)

        # FEEDFORWARD
        for idx, (conv_fsiz, conv_k, conv_str, pool_fsiz,
                  pool_str) in enumerate(
                      zip(self.ff_conv_fsiz, self.ff_conv_k,
                          self.ff_conv_strides, self.ff_pool_fsiz,
                          self.ff_pool_strides)):
            with tf.variable_scope(self.var_scope + '/ff_%s' % idx,
                                   reuse=tf.AUTO_REUSE):
                weights = tf.get_variable("weights")
            # POOL
            ff = max_pool(bottom=ff,
                          k=[1] + pool_fsiz + [1],
                          s=[1] + pool_str + [1],
                          name='ff_pool_hgru')
            # CONV
            ff = tf.nn.conv2d(input=ff,
                              filter=weights,
                              strides=conv_str,
                              padding='SAME')
            ff = tf.contrib.layers.batch_norm(
                inputs=ff,
                scale=True,
                center=True,
                fused=True,
                renorm=False,
                reuse=False,
                scope=None,
                param_initializers=self.bn_param_initializer,
                updates_collections=None,
                is_training=self.train)
            ff = tf.nn.relu(ff)
            ff_list.append(ff)

        # GLOBAL POOL and then TILE
        if self.use_global_pool:
            top_map_shape = ff_list[-1].get_shape().as_list()
            ff = global_pool(bottom=ff, name='global_pool', aux={})
            ff = tf.tile(tf.expand_dims(tf.expand_dims(ff, 1), 1),
                         [1] + top_map_shape[1:3] + [1])

        # TOPDOWN
        fb = ff
        if not self.share_ff_td_kernels:
            scp = 'fb_'
        else:
            scp = 'ff_'
        for idx in range(len(ff_list))[::-1]:
            if idx != 0:
                with tf.variable_scope(self.var_scope + '/' + scp + '%s' %
                                       (idx - 1),
                                       reuse=True):
                    weights = tf.get_variable("weights")
                fb = self.resize_x_to_y(x=fb,
                                        y=ff_list[idx - 1],
                                        kernel=weights,
                                        mode='transpose',
                                        strides=self.ff_pool_strides[idx - 1])
                fb = tf.contrib.layers.batch_norm(
                    inputs=fb,
                    scale=True,
                    center=True,
                    fused=True,
                    renorm=False,
                    reuse=False,
                    scope=None,
                    param_initializers=self.bn_param_initializer,
                    updates_collections=None,
                    is_training=self.train)
                fb = tf.nn.relu(fb)
            else:
                with tf.variable_scope(self.var_scope + '/fb_0', reuse=True):
                    weights = tf.get_variable("weights")
                fb = self.resize_x_to_y(x=fb,
                                        y=x,
                                        kernel=weights,
                                        mode='transpose',
                                        strides=[1, 1])
                fb = tf.contrib.layers.batch_norm(
                    inputs=fb,
                    scale=True,
                    center=True,
                    fused=True,
                    renorm=False,
                    reuse=False,
                    scope=None,
                    param_initializers=self.bn_param_initializer,
                    updates_collections=None,
                    is_training=self.train)
                fb = tf.nn.relu(fb)

        # HGRU_TD
        td0_h1, l0_h2 = self.hgru_td0.run(fb, td0_h1, l0_h2)

        # Iterate loop
        i0 += 1
        return i0, x, l0_h1, l0_h2, td0_h1
Ejemplo n.º 8
0
def build_model(data_tensor, reuse, training, output_shape):
    """Create the hgru from Learning long-range..."""
    if isinstance(output_shape, list):
        output_shape = output_shape[0]
    elif isinstance(output_shape, dict):
        nhot_shape = output_shape['aux']
        output_shape = output_shape['output']
        use_aux = True
    with tf.variable_scope('cnn', reuse=reuse):
        # Unclear if we should include l0 in the down/upsample cascade
        with tf.variable_scope('g1', reuse=reuse):
            # Downsample
            act11 = conv_block(
                x=data_tensor,
                name='l1_1',
                filters=64,
                training=training,
                reuse=reuse,
                pool=False)
            act12 = conv_block(
                x=act11,
                name='l1_2',
                filters=64,
                training=training,
                reuse=reuse,
                pool=False)
            poolact12 = pooling.max_pool(
                bottom=act12,
                name='l1_2_pool')

        with tf.variable_scope('g2', reuse=reuse):
            # Downsample
            act21 = conv_block(
                x=poolact12,
                name='l2_1',
                filters=128,
                training=training,
                reuse=reuse,
                pool=False)
            act22 = conv_block(
                x=act21,
                filters=128,
                name='l2_2',
                training=training,
                reuse=reuse,
                pool=False)
            poolact22 = pooling.max_pool(
                bottom=act22,
                name='l2_2_pool')

        with tf.variable_scope('g3', reuse=reuse):
            # Downsample
            act31 = conv_block(
                x=poolact22,
                name='l3_1',
                filters=256,
                training=training,
                reuse=reuse,
                pool=False)
            act32 = conv_block(
                x=act31,
                filters=256,
                name='l3_2',
                training=training,
                reuse=reuse,
                pool=False)
            act33 = conv_block(
                x=act32,
                filters=256,
                name='l3_3',
                training=training,
                reuse=reuse,
                pool=False)
            poolact33 = pooling.max_pool(
                bottom=act33,
                name='l3_3_pool')

        with tf.variable_scope('g4', reuse=reuse):
            # Downsample
            act41 = conv_block(
                x=poolact33,
                name='l4_1',
                filters=512,
                training=training,
                reuse=reuse,
                pool=False)
            act42 = conv_block(
                x=act41,
                filters=512,
                name='l4_2',
                training=training,
                reuse=reuse,
                pool=False)
            act43 = conv_block(
                x=act42,
                filters=512,
                name='l4_3',
                training=training,
                reuse=reuse,
                pool=False)
            poolact43 = pooling.max_pool(
                bottom=act43,
                name='l4_3_pool')

        with tf.variable_scope('g5', reuse=reuse):
            # Downsample
            act51 = conv_block(
                x=poolact43,
                name='l5_1',
                filters=512,
                training=training,
                reuse=reuse,
                pool=False)
            act52 = conv_block(
                x=act51,
                filters=512,
                name='l5_2',
                training=training,
                reuse=reuse,
                pool=False)
            act53 = conv_block(
                x=act52,
                filters=512,
                name='l5_3',
                training=training,
                reuse=reuse,
                pool=False)
            poolact53 = pooling.max_pool(
                bottom=act53,
                name='l5_3_pool')

        with tf.variable_scope('g5_skip', reuse=reuse):
            upact5 = up_block(
                inputs=poolact53,
                skip=act53,
                up_filters=512,
                name='ul5',
                training=training,
                reuse=reuse)

        with tf.variable_scope('g4_skip', reuse=reuse):
            upact4 = up_block(
                inputs=upact5,
                skip=act43,
                up_filters=512,
                name='ul4',
                training=training,
                reuse=reuse)

        with tf.variable_scope('g3_skip', reuse=reuse):
            upact3 = up_block(
                inputs=upact4,
                skip=act33,
                up_filters=256,
                name='ul3',
                training=training,
                reuse=reuse)

        with tf.variable_scope('g2_skip', reuse=reuse):
            upact2 = up_block(
                inputs=upact3,
                skip=act22,
                up_filters=128,
                name='ul2',
                training=training,
                reuse=reuse)

        with tf.variable_scope('g1_skip', reuse=reuse):
            upact1 = up_block(
                inputs=upact2,
                skip=act12,
                up_filters=64,
                name='ul1',
                training=training,
                reuse=reuse)

        with tf.variable_scope('readout_1', reuse=reuse):
            activity = conv.conv_layer(
                bottom=upact1,
                name='pre_readout_conv',
                num_filters=2,
                kernel_size=1,
                trainable=training,
                use_bias=False)
            pool_aux = {'pool_type': 'max'}
            activity = pooling.global_pool(
                bottom=activity,
                name='pre_readout_pool',
                aux=pool_aux)
            activity = normalization.batch(
                bottom=activity,
                renorm=True,
                name='readout_1_bn',
                training=training)

        with tf.variable_scope('readout_2', reuse=reuse):
            pre_activity = tf.layers.flatten(
                activity,
                name='flat_readout')
            activity = tf.layers.dense(
                inputs=pre_activity,
                units=output_shape)
        if use_aux:
            nhot = tf.layers.dense(inputs=pre_activity, units=nhot_shape)
        else:
            nhot = tf.constant(0.)
    extra_activities = {
        'activity': activity,
        'nhot': nhot
    }
    return activity, extra_activities
Ejemplo n.º 9
0
def build_model(data_tensor, reuse, training, output_shape):
    """Create the hgru from Learning long-range..."""
    if isinstance(output_shape, list):
        output_shape = output_shape[0]
    data_format = 'channels_last'
    conv_kernel = [
        [3, 3],
        [3, 3],
        [3, 3],
    ]
    up_kernel = [2, 2]
    filters = [28, 36, 48, 64, 80]
    with tf.variable_scope('cnn', reuse=reuse):
        # Unclear if we should include l0 in the down/upsample cascade
        with tf.variable_scope('in_embedding', reuse=reuse):
            in_emb = tf.layers.conv2d(inputs=data_tensor,
                                      filters=filters[0],
                                      kernel_size=5,
                                      name='l0',
                                      strides=(1, 1),
                                      padding='same',
                                      activation=tf.nn.elu,
                                      data_format=data_format,
                                      trainable=training,
                                      use_bias=True)

        # Downsample
        l1 = conv.down_block(layer_name='l1',
                             bottom=in_emb,
                             kernel_size=conv_kernel,
                             num_filters=filters[1],
                             training=training,
                             reuse=reuse)
        l2 = conv.down_block(layer_name='l2',
                             bottom=l1,
                             kernel_size=conv_kernel,
                             num_filters=filters[2],
                             training=training,
                             reuse=reuse)
        l3 = conv.down_block(layer_name='l3',
                             bottom=l2,
                             kernel_size=conv_kernel,
                             num_filters=filters[3],
                             training=training,
                             reuse=reuse)
        l4 = conv.down_block(layer_name='l4',
                             bottom=l3,
                             kernel_size=conv_kernel,
                             num_filters=filters[4],
                             training=training,
                             reuse=reuse)

        # Upsample
        ul3 = conv.up_block(layer_name='ul3',
                            bottom=l4,
                            skip_activity=l3,
                            kernel_size=up_kernel,
                            num_filters=filters[3],
                            training=training,
                            reuse=reuse)
        ul3 = conv.down_block(layer_name='ul3_d',
                              bottom=ul3,
                              kernel_size=conv_kernel,
                              num_filters=filters[3],
                              training=training,
                              reuse=reuse,
                              include_pool=False)
        ul2 = conv.up_block(layer_name='ul2',
                            bottom=ul3,
                            skip_activity=l2,
                            kernel_size=up_kernel,
                            num_filters=filters[2],
                            training=training,
                            reuse=reuse)
        ul2 = conv.down_block(layer_name='ul2_d',
                              bottom=ul2,
                              kernel_size=conv_kernel,
                              num_filters=filters[2],
                              training=training,
                              reuse=reuse,
                              include_pool=False)
        ul1 = conv.up_block(layer_name='ul1',
                            bottom=ul2,
                            skip_activity=l1,
                            kernel_size=up_kernel,
                            num_filters=filters[1],
                            training=training,
                            reuse=reuse)
        ul1 = conv.down_block(layer_name='ul1_d',
                              bottom=ul1,
                              kernel_size=conv_kernel,
                              num_filters=filters[1],
                              training=training,
                              reuse=reuse,
                              include_pool=False)
        ul0 = conv.up_block(layer_name='ul0',
                            bottom=ul1,
                            skip_activity=in_emb,
                            kernel_size=up_kernel,
                            num_filters=filters[0],
                            training=training,
                            reuse=reuse)

        with tf.variable_scope('readout_1', reuse=reuse):
            activity = conv.conv_layer(bottom=ul0,
                                       name='pre_readout_conv',
                                       num_filters=2,
                                       kernel_size=1,
                                       trainable=training,
                                       use_bias=False)
            pool_aux = {'pool_type': 'max'}
            activity = pooling.global_pool(bottom=activity,
                                           name='pre_readout_pool',
                                           aux=pool_aux)
            activity = normalization.batch(bottom=activity,
                                           renorm=True,
                                           name='readout_1_bn',
                                           training=training)

        with tf.variable_scope('readout_2', reuse=reuse):
            activity = tf.layers.flatten(activity, name='flat_readout')
            activity = tf.layers.dense(inputs=activity, units=output_shape)
    extra_activities = {'l4': l4}
    return activity, extra_activities
Ejemplo n.º 10
0
def build_model(data_tensor, reuse, training):
    """Create the hgru from Learning long-range..."""
    with tf.variable_scope('cnn', reuse=reuse):
        # Unclear if we should include l0 in the down/upsample cascade
        with tf.variable_scope('g1', reuse=reuse):
            # Downsample
            act11 = conv_block(x=data_tensor,
                               name='l1_1',
                               filters=64,
                               training=training,
                               reuse=reuse,
                               pool=False)
            act12 = conv_block(x=act11,
                               name='l1_2',
                               filters=64,
                               training=training,
                               reuse=reuse)

        with tf.variable_scope('g2', reuse=reuse):
            # Downsample
            act21 = conv_block(x=act12,
                               name='l2_1',
                               filters=128,
                               training=training,
                               reuse=reuse,
                               pool=False)
            act22 = conv_block(x=act21,
                               filters=128,
                               name='l2_2',
                               training=training,
                               reuse=reuse)

        with tf.variable_scope('g3', reuse=reuse):
            # Downsample
            act31 = conv_block(x=act22,
                               name='l3_1',
                               filters=256,
                               training=training,
                               reuse=reuse,
                               pool=False)
            act32 = conv_block(x=act31,
                               filters=256,
                               name='l3_2',
                               training=training,
                               reuse=reuse,
                               pool=False)
            act33 = conv_block(x=act32,
                               filters=256,
                               name='l3_3',
                               training=training,
                               reuse=reuse)

        with tf.variable_scope('g4', reuse=reuse):
            # Downsample
            act41 = conv_block(x=act33,
                               name='l4_1',
                               filters=512,
                               training=training,
                               reuse=reuse,
                               pool=False)
            act42 = conv_block(x=act41,
                               filters=512,
                               name='l4_2',
                               training=training,
                               reuse=reuse,
                               pool=False)
            act43 = conv_block(x=act42,
                               filters=512,
                               name='l4_3',
                               training=training,
                               reuse=reuse)

        with tf.variable_scope('g5', reuse=reuse):
            # Downsample
            act51 = conv_block(x=act43,
                               name='l5_1',
                               filters=512,
                               training=training,
                               reuse=reuse,
                               pool=False)
            act52 = conv_block(x=act51,
                               filters=512,
                               name='l5_2',
                               training=training,
                               reuse=reuse,
                               pool=False)
            act53 = conv_block(x=act52,
                               filters=512,
                               name='l5_3',
                               training=training,
                               reuse=reuse)

        with tf.variable_scope('resize', reuse=reuse):
            sel_layers = [act12, act22, act33, act43, act53]
            target_size = act12.get_shape().as_list()[1:3]
            for idx, l in enumerate(sel_layers):
                sel_layers[idx] = misc.resize(x=l,
                                              size=target_size,
                                              method='bilinear')
                sel_layers = tf.concat(sel_layers, axis=-1)

        with tf.variable_scope('readout_1', reuse=reuse):
            activity = conv.conv_layer(bottom=sel_layers,
                                       name='pre_readout_conv',
                                       num_filters=2,
                                       kernel_size=1,
                                       trainable=training,
                                       use_bias=False)
            pool_aux = {'pool_type': 'max'}
            activity = pooling.global_pool(bottom=activity,
                                           name='pre_readout_pool',
                                           aux=pool_aux)
            activity = normalization.batch(bottom=activity,
                                           renorm=True,
                                           name='readout_1_bn',
                                           training=training)

        with tf.variable_scope('readout_2', reuse=reuse):
            activity = tf.layers.flatten(activity, name='flat_readout')
            activity = tf.layers.dense(inputs=activity, units=2)
    return activity, activity
Ejemplo n.º 11
0
def build_model(data_tensor, reuse, training, output_shape):
    """Create the hgru from Learning long-range..."""
    if isinstance(output_shape, list):
        output_shape = output_shape[0]
    elif isinstance(output_shape, dict):
        nhot_shape = output_shape['aux']
        output_shape = output_shape['output']
        use_aux = True
    with tf.variable_scope('cnn', reuse=reuse):
        with tf.variable_scope('input', reuse=reuse):
            in_emb = tf.layers.conv2d(inputs=data_tensor,
                                      filters=8,
                                      kernel_size=11,
                                      name='l0',
                                      strides=(1, 1),
                                      padding='same',
                                      activation=tf.nn.elu,
                                      trainable=training,
                                      use_bias=True)
            in_emb = pooling.max_pool(bottom=in_emb,
                                      name='p1',
                                      k=[1, 2, 2, 1],
                                      s=[1, 2, 2, 1])
            in_emb = tf.layers.conv2d(inputs=in_emb,
                                      filters=8,
                                      kernel_size=7,
                                      name='l1',
                                      strides=(1, 1),
                                      padding='same',
                                      activation=tf.nn.elu,
                                      trainable=training,
                                      use_bias=True)
            layer_hgru = hgru.hGRU('hgru_1',
                                   x_shape=in_emb.get_shape().as_list(),
                                   timesteps=8,
                                   h_ext=11,
                                   strides=[1, 1, 1, 1],
                                   padding='SAME',
                                   aux={
                                       'reuse': False,
                                       'constrain': False
                                   },
                                   train=training)
            h2 = layer_hgru.build(in_emb)
            h2 = normalization.batch(bottom=h2,
                                     renorm=True,
                                     name='hgru_bn',
                                     training=training)

        with tf.variable_scope('readout_1', reuse=reuse):
            activity = conv.conv_layer(bottom=h2,
                                       name='pre_readout_conv',
                                       num_filters=2,
                                       kernel_size=1,
                                       trainable=training,
                                       use_bias=False)
            pool_aux = {'pool_type': 'max'}
            activity = pooling.global_pool(bottom=activity,
                                           name='pre_readout_pool',
                                           aux=pool_aux)
            activity = normalization.batch(bottom=activity,
                                           renorm=True,
                                           name='readout_1_bn',
                                           training=training)

        with tf.variable_scope('readout_2', reuse=reuse):
            pre_activity = tf.layers.flatten(activity, name='flat_readout')
            activity = tf.layers.dense(inputs=pre_activity, units=output_shape)
        if use_aux:
            nhot = tf.layers.dense(inputs=pre_activity, units=nhot_shape)
        else:
            nhot = tf.constant(0.)
    extra_activities = {'activity': activity, 'nhot': nhot}
    return activity, extra_activities