コード例 #1
0
ファイル: qanet.py プロジェクト: colinsongf/qa-reader
 def _encode(self):
     with tf.variable_scope("passage_question_encoding"):
         self.p_encode = residual_block(self.p_emb,
                                        num_blocks=1,
                                        num_conv_layers=4,
                                        kernel_size=7,
                                        mask=None,
                                        num_filters=self.hidden_size,
                                        num_heads=self.num_head,
                                        seq_len=self.p_length,
                                        scope="Encoder_Residual_Block",
                                        bias=False,
                                        dropout=self.dropout)
         self.q_encode = residual_block(
             self.q_emb,
             num_blocks=1,
             num_conv_layers=4,
             kernel_size=7,
             mask=None,
             num_filters=self.hidden_size,
             num_heads=self.num_head,
             seq_len=self.q_length,
             scope="Encoder_Residual_Block",
             reuse=True,  # Share the weights between passage and question
             bias=False,
             dropout=self.dropout)
コード例 #2
0
ファイル: models.py プロジェクト: anniegao2007/faces-BIM
def get_evaluate_model(width, height):
    input_o = layers.Input(shape=(height, width, 3),
                           dtype='float32',
                           name='input_o')

    c1 = layers.Conv2D(32, (9, 9), strides=1, padding='same',
                       name='conv_1')(input_o)
    c1 = layers.BatchNormalization(name='normal_1')(c1)
    c1 = layers.Activation('relu', name='relu_1')(c1)

    c2 = layers.Conv2D(64, (3, 3), strides=2, padding='same',
                       name='conv_2')(c1)
    c2 = layers.BatchNormalization(name='normal_2')(c2)
    c2 = layers.Activation('relu', name='relu_2')(c2)

    c3 = layers.Conv2D(128, (3, 3), strides=2, padding='same',
                       name='conv_3')(c2)
    c3 = layers.BatchNormalization(name='normal_3')(c3)
    c3 = layers.Activation('relu', name='relu_3')(c3)

    r1 = residual_block(c3, 1)
    r2 = residual_block(r1, 2)
    r3 = residual_block(r2, 3)
    r4 = residual_block(r3, 4)
    r5 = residual_block(r4, 5)

    d1 = layers.Conv2DTranspose(64, (3, 3),
                                strides=2,
                                padding='same',
                                name='conv_4')(r5)
    d1 = layers.BatchNormalization(name='normal_4')(d1)
    d1 = layers.Activation('relu', name='relu_4')(d1)

    d2 = layers.Conv2DTranspose(32, (3, 3),
                                strides=2,
                                padding='same',
                                name='conv_5')(d1)
    d2 = layers.BatchNormalization(name='normal_5')(d2)
    d2 = layers.Activation('relu', name='relu_5')(d2)

    c4 = layers.Conv2D(3, (9, 9), strides=1, padding='same', name='conv_6')(d2)
    c4 = layers.BatchNormalization(name='normal_6')(c4)
    c4 = layers.Activation('tanh', name='tanh_1')(c4)
    c4 = OutputScale(name='output')(c4)

    model = Model([input_o], c4)
    print("evaluate model built successfully!")
    return model
コード例 #3
0
def relation_block(self, processed_obs):
    entities = build_entities(processed_obs, self.reduce_obs)
    print('entities:', entities)
    # [B,n_heads,N,Deepth=D+2]
    MHDPA_output, self.relations = MHDPA(entities, n_heads=2)
    print('MHDPA_output', MHDPA_output)
    # [B,n_heads,N,Deepth]
    residual_output = residual_block(entities, MHDPA_output)
    print('residual_output', residual_output)
    # max_pooling [B,n_heads,N,Deepth] --> [B,n_heads,Deepth]
    maxpooling_output = tf.reduce_max(residual_output, axis=2)
    print('maxpooling_output', maxpooling_output)
    # [B,n_heads*Deepth]
    # output = tf.layers.flatten(maxpooling_output)
    # output = layerNorm(output, "relation_layerNorm")
    # print('relation_layerNorm', output)
    return maxpooling_output
コード例 #4
0
ファイル: qanet.py プロジェクト: colinsongf/qa-reader
 def _fuse(self):
     with tf.variable_scope("model_encoder_layer"):
         inputs = tf.concat(self.attention_outputs, axis=-1)
         self.enc = [
             conv(inputs, self.hidden_size, name="input_projection")
         ]
         for i in range(3):
             if i % 2 == 0:  # dropout every 2 blocks
                 self.enc[i] = tf.nn.dropout(self.enc[i],
                                             1.0 - self.dropout)
             self.enc.append(
                 residual_block(self.enc[i],
                                num_blocks=7,
                                num_conv_layers=2,
                                kernel_size=5,
                                mask=None,
                                num_filters=self.hidden_size,
                                num_heads=self.num_head,
                                seq_len=self.p_length,
                                scope="Model_Encoder",
                                bias=False,
                                reuse=True if i > 0 else None,
                                dropout=self.dropout))
コード例 #5
0
ファイル: resnet.py プロジェクト: coreyjadams/NextResNet
    def _build_network(self, input_placeholder):

        x = input_placeholder
        # Initial convolution to get to the correct number of filters:
        x = tf.layers.conv3d(x,
                             self._params['N_INITIAL_FILTERS'],
                             kernel_size=[5, 5, 5],
                             strides=[1, 1, 1],
                             padding='same',
                             use_bias=False,
                             trainable=self._params['TRAINING'],
                             name="Conv2DInitial",
                             reuse=None)

        # ReLU:
        x = tf.nn.relu(x)

        # Begin the process of residual blocks and downsampling:
        for i in xrange(self._params['NETWORK_DEPTH']):

            for j in xrange(self._params['RESIDUAL_BLOCKS_PER_LAYER']):
                x = residual_block(
                    x,
                    self._params['TRAINING'],
                    batch_norm=self._params['BATCH_NORMALIZATION'],
                    name="resblock_down_{0}_{1}".format(i, j))

            x = downsample_block(
                x,
                self._params['TRAINING'],
                batch_norm=self._params['BATCH_NORMALIZATION'],
                name="downsample_{0}".format(i))

        # At the bottom, do another residual block:
        for j in xrange(self._params['RESIDUAL_BLOCKS_PER_LAYER']):
            x = residual_block(x,
                               self._params['TRAINING'],
                               batch_norm=self._params['BATCH_NORMALIZATION'],
                               name="deepest_block_{0}".format(j))

        # At this point, we ought to have a network that has the same shape as the initial input, but with more filters.
        # We can use a bottleneck to map it onto the right dimensions:
        x = tf.layers.conv3d(
            x,
            self._params['NUM_LABELS'],
            kernel_size=[1, 1, 1],
            strides=[1, 1, 1],
            padding='same',
            activation=None,
            use_bias=False,
            trainable=self._params['TRAINING'],
            name="BottleneckConv2D",
        )

        # Apply global average pooling to each filter to get the values for signal/bkg

        # For global average pooling, need to get the shape of the input:
        shape = (x.shape[1], x.shape[2], x.shape[3])

        x = tf.nn.pool(x,
                       window_shape=shape,
                       pooling_type="AVG",
                       padding="VALID",
                       dilation_rate=None,
                       strides=None,
                       name="GlobalAveragePool",
                       data_format=None)

        # Reshape to remove empty dimensions:
        x = tf.reshape(x, [tf.shape(x)[0], self._params['NUM_LABELS']],
                       name="global_pooling_reshape")

        # The final activation is softmax across the pixels.  It gets applied in the loss function
        #         x = tf.nn.softmax(x)
        return x
コード例 #6
0
    def _build_network(self, input_placeholder):

        x = input_placeholder

        # Initially, downsample by a factor of 2 to make the input data smaller:
        x = tf.layers.average_pooling2d(x,
                                        2,
                                        2,
                                        padding='same',
                                        name="InitialAveragePooling")

        # The filters are concatenated at some point, and progress together

        if self._params['SHARE_PLANE_WEIGHTS']:
            sharing = True

        verbose = True

        if verbose:
            print "Initial shape: " + str(x.get_shape())
        n_planes = self._params['NPLANES']

        x = tf.split(x, n_planes * [1], -1)
        if verbose:
            for p in range(len(x)):
                print "Plane {0} initial shape:".format(p) + str(
                    x[p].get_shape())

        # Initial convolution to get to the correct number of filters:
        for p in range(len(x)):

            name = "Conv2DInitial"
            if not sharing:
                name += "_plane{0}".format(p)
            # Only reuse on the non-first times through:
            if p == 0:
                reuse = False
            else:
                reuse = sharing
            x[p] = tf.layers.conv2d(x[p],
                                    self._params['N_INITIAL_FILTERS'],
                                    kernel_size=[7, 7],
                                    strides=[2, 2],
                                    padding='same',
                                    use_bias=False,
                                    trainable=self._params['TRAINING'],
                                    name=name,
                                    reuse=reuse)

            # ReLU:
            x[p] = tf.nn.relu(x[p])

        if verbose:
            print "After initial convolution: "

            for p in range(len(x)):
                print "Plane {0}".format(p) + str(x[p].get_shape())

        for p in xrange(len(x)):
            name = "initial_resblock1"
            if not sharing:
                name += "_plane{0}".format(p)

            # Only reuse on the non-first times through:
            if p == 0:
                reuse = False
            else:
                reuse = sharing

            x[p] = residual_block(x[p],
                                  self._params['TRAINING'],
                                  batch_norm=True,
                                  reuse=reuse,
                                  name=name)
            name = "initial_resblock2"
            if not sharing:
                name += "_plane{0}".format(p)

            x[p] = residual_block(x[p],
                                  self._params['TRAINING'],
                                  batch_norm=True,
                                  reuse=reuse,
                                  name=name)

        # Begin the process of residual blocks and downsampling:
        for p in xrange(len(x)):
            for i in xrange(self._params['NETWORK_DEPTH_PRE_MERGE']):

                name = "downsample_{0}".format(i)
                if not sharing:
                    name += "_plane{0}".format(p)
                # Only reuse on the non-first times through:
                if p == 0:
                    reuse = False
                else:
                    reuse = sharing

                x[p] = downsample_block(x[p],
                                        self._params['TRAINING'],
                                        batch_norm=True,
                                        reuse=reuse,
                                        name=name)

                for j in xrange(self._params['RESIDUAL_BLOCKS_PER_LAYER']):
                    name = "resblock_{0}_{1}".format(i, j)
                    if not sharing:
                        name += "_plane{0}".format(p)
                    x[p] = residual_block(x[p],
                                          self._params['TRAINING'],
                                          batch_norm=True,
                                          reuse=reuse,
                                          name=name)
                if verbose:
                    print "Plane {p}, layer {i}: x[{p}].get_shape(): {s}".format(
                        p=p, i=i, s=x[p].get_shape())

                # Add a bottleneck to prevent the number of layers from exploding:
                n_current_filters = x[p].get_shape().as_list()[-1]
                if n_current_filters > self._params['N_MAX_FILTERS']:
                    n_filters = self._params['N_MAX_FILTERS']
                else:
                    n_filters = n_current_filters
                name = "Bottleneck_downsample_{0}".format(i)
                if not sharing:
                    name += "_plane{0}".format(p)
                x[p] = tf.layers.conv2d(x[p],
                                        n_filters,
                                        kernel_size=[1, 1],
                                        strides=[1, 1],
                                        padding='same',
                                        activation=None,
                                        use_bias=False,
                                        trainable=self._params['TRAINING'],
                                        reuse=reuse,
                                        name=name)

        # print "Reached the deepest layer."

        # Here, concatenate all the planes together before the residual block:
        x = tf.concat(x, axis=-1)

        if verbose:
            print "Shape after concatenation: " + str(x.get_shape())

        # At the bottom, do another residual block:
        for i in xrange(self._params['NETWORK_DEPTH_POST_MERGE']):

            x = downsample_block(x,
                                 self._params['TRAINING'],
                                 batch_norm=True,
                                 name="downsample_postmerge{0}".format(i))

            for j in xrange(self._params['RESIDUAL_BLOCKS_PER_LAYER']):
                x = residual_block(x,
                                   self._params['TRAINING'],
                                   batch_norm=True,
                                   name="resblock_postmerge_{0}_{1}".format(
                                       i, j))

            # Apply bottlenecking here to keep the number of filters in check:

            x = tf.layers.conv2d(
                x,
                self._params['N_MAX_FILTERS'],
                kernel_size=[1, 1],
                strides=[1, 1],
                padding='same',
                activation=None,
                use_bias=False,
                trainable=self._params['TRAINING'],
                name="Bottleneck_downsample_merged_{0}".format(i))

        if verbose:
            print "Shape after final block: " + str(x.get_shape())

        # Apply a bottle neck to get the right shape:
        x = tf.layers.conv2d(x,
                             self._params['NUM_LABELS'],
                             kernel_size=[1, 1],
                             strides=[1, 1],
                             padding='same',
                             activation=None,
                             use_bias=False,
                             trainable=self._params['TRAINING'],
                             name="BottleneckConv2D")

        if verbose:
            print "Shape after bottleneck: " + str(x.get_shape())

        # Apply global average pooling to get the right final shape:
        shape = (x.shape[1], x.shape[2])
        x = tf.nn.pool(x,
                       window_shape=shape,
                       pooling_type="AVG",
                       padding="VALID",
                       dilation_rate=None,
                       strides=None,
                       name="GlobalAveragePool",
                       data_format=None)

        if verbose:
            print "Shape after pooling: " + str(x.get_shape())

        # Reshape to the right shape for logits:
        x = tf.reshape(x, [tf.shape(x)[0], self._params['NUM_LABELS']],
                       name="global_pooling_reshape")

        if verbose:
            print "Final shape: " + str(x.get_shape())

        return x
コード例 #7
0
ファイル: generator.py プロジェクト: DeepLearnPhysics/DCGAN
def build_generator(
    input_tensor,
    n_initial_filters=64,
    n_blocks=2,
    is_training=True,
    reuse=False,
):
    """
    Build a DC GAN generator with deep convolutional layers
    Input_tensor is assumed to be reshaped into a (BATCH, L, H, F) type format (rectangular)
    """

    with tf.variable_scope("generator"):
        # Map the input to a small but many-filtered set up:

        input_shape = input_tensor.get_shape()
        print input_shape
        # Want the output tensor to have a small number of spatial dimensions (7x7 for mnist)

        # output size will be (W-F+2P)/S + 1
        # input of 10, F=5, P = 0, S= 2 gives output size of (10 - 5)/2 + 1 = 3
        # To get a specific outputsize (here == 7) with P == 1, and S = 2, set F as
        # 7 = 1 + (10 - F)/1 -> 6 = 10 -F, or F = 4

        x = tf.layers.conv2d(
            input_tensor,
            n_initial_filters,
            kernel_size=[4, 4],
            strides=[1, 1],
            padding='valid',
            activation=None,
            use_bias=False,
            kernel_initializer=None,  # automatically uses Xavier initializer
            kernel_regularizer=None,
            activity_regularizer=None,
            trainable=is_training,
            name="Conv2D",
            reuse=None)

        print x.get_shape()

        # Apply residual mappings and upsamplings:
        for block in xrange(n_blocks):
            x = residual_block(x,
                               is_training=is_training,
                               kernel=[3, 3],
                               stride=[1, 1],
                               alpha=0.0,
                               name="res_block_{}".format(block),
                               reuse=reuse)

            x = upsample_block(x,
                               is_training=is_training,
                               kernel=[3, 3],
                               stride=[1, 1],
                               name="res_block_upsample_{}".format(block))

        # Apply a 1x1 convolution to map to just one output filter:
        x = tf.layers.conv2d(
            x,
            1,
            kernel_size=[3, 3],
            strides=[1, 1],
            padding='same',
            activation=None,
            use_bias=False,
            kernel_initializer=None,  # automatically uses Xavier initializer
            kernel_regularizer=None,
            activity_regularizer=None,
            trainable=is_training,
            name="FinalConv2D1x1",
            reuse=None)

        # For the final activation, apply tanh:
        return tf.nn.tanh(x)
コード例 #8
0
ファイル: classifier.py プロジェクト: DeepLearnPhysics/DCGAN
def build_classifier(input_tensor, 
                     n_output_classes=10,
                     is_training=True,
                     n_initial_filters=12,
                     initial_kernel=3,
                     initial_stride=1,
                     n_blocks=4,
                     downsample_interval=1):

    with tf.variable_scope("mnist_classifier"):

        # Initial convolutional layer:
        x = tf.layers.conv2d(input_tensor,
                             n_initial_filters,
                             kernel_size=(initial_kernel,
                                          initial_kernel),
                             strides=(initial_stride,
                                      initial_stride),
                             padding='same',
                             activation=None,
                             use_bias=False,
                             bias_initializer=tf.zeros_initializer(),
                             trainable=is_training,
                             name="InitialConv2D",
                             reuse=None)

        for i in xrange(n_blocks):

            if i != 0 and i % downsample_interval == 0:
                x = downsample_block(x, name="res_block_downsample_{}".format(i),
                                     is_training=is_training)
            else:
                x = residual_block(x, name="res_block_{}".format(i),
                                   is_training=is_training)

        # A final convolution to map the features onto the right space:
        with tf.variable_scope("final_pooling"):
            # Batch normalization is applied first:
            x = tf.layers.batch_normalization(x,
                                              axis=-1,
                                              momentum=0.99,
                                              epsilon=0.001,
                                              center=True,
                                              scale=True,
                                              beta_initializer=tf.zeros_initializer(),
                                              gamma_initializer=tf.ones_initializer(),
                                              moving_mean_initializer=tf.zeros_initializer(),
                                              moving_variance_initializer=tf.ones_initializer(),
                                              beta_regularizer=None,
                                              gamma_regularizer=None,
                                              training=is_training,
                                              trainable=is_training,
                                              name="BatchNorm",
                                              reuse=None)

            # ReLU:
            x = tf.nn.relu(x, name="final_pooling")

            x = tf.layers.conv2d(x,
                                 n_output_classes,
                                 kernel_size=(1, 1),
                                 strides=(1, 1),
                                 padding='same',
                                 data_format='channels_last',
                                 dilation_rate=(1, 1),
                                 activation=None,
                                 use_bias=False,
                                 kernel_initializer=None,  # automatically uses Xavier initializer
                                 bias_initializer=tf.zeros_initializer(),
                                 kernel_regularizer=None,
                                 bias_regularizer=None,
                                 activity_regularizer=None,
                                 trainable=is_training,
                                 name="Conv2DBottleNeck",
                                 # name="convolution_globalpool_bottleneck1x1",
                                 reuse=None)

            # For global average pooling, need to get the shape of the input:
            shape = (x.shape[1], x.shape[2])

            x = tf.nn.pool(x,
                           window_shape=shape,
                           pooling_type="AVG",
                           padding="VALID",
                           dilation_rate=None,
                           strides=None,
                           name="GlobalAveragePool",
                           data_format=None)

            # Reshape to remove empty dimensions:
            x = tf.reshape(x, [tf.shape(x)[0], n_output_classes],
                           name="global_pooling_reshape")
            # Apply the activation:
            x = tf.nn.softmax(x, dim=-1)

        return x
コード例 #9
0
    def _build_network(self, input_placeholder):

        x = input_placeholder

        # We break up the intial filters into parallel U ResNets
        # The filters are concatenated at the deepest level
        # And then they are split again into the parallel chains

        verbose = False

        # print x.get_shape()
        n_planes = self._params['NPLANES']

        if self._params['SHARE_PLANE_WEIGHTS']:
            sharing = True
        else:
            sharing = False

        x = tf.split(x, n_planes*[1], -1)
        # for p in range(len(x)):
        #     print x[p].get_shape()

        # Initial convolution to get to the correct number of filters:
        for p in range(len(x)):
            name = "Conv2DInitial"
            reuse = False
            if not sharing:
                name += "_plane{0}".format(p)
            if sharing and p != 0:
                reuse = True

            if verbose:
                print "Name: {0} + reuse: {1}".format(name, reuse)

            x[p] = tf.layers.conv2d(x[p], self._params['N_INITIAL_FILTERS'],
                                    kernel_size=[7, 7],
                                    strides=[1, 1],
                                    padding='same',
                                    use_bias=False,
                                    trainable=self._params['TRAINING'],
                                    name=name,
                                    reuse=reuse)

            # ReLU:
            x[p] = tf.nn.relu(x[p])

        # for p in range(len(x)):
        #     print x[p].get_shape()




        # Need to keep track of the outputs of the residual blocks before downsampling, to feed
        # On the upsampling side

        network_filters = [[] for p in range(len(x))]

        # Begin the process of residual blocks and downsampling:
        for p in xrange(len(x)):
            for i in xrange(self._params['NETWORK_DEPTH']):


                for j in xrange(self._params['RESIDUAL_BLOCKS_PER_LAYER']):
                    name = "resblock_down"
                    reuse = False
                    if not sharing:
                        name += "_plane{0}".format(p)
                    if sharing and p != 0:
                        reuse = True

                    name += "_{0}_{1}".format(i, j)

                    if verbose:
                        print "Name: {0} + reuse: {1}".format(name, reuse)


                    x[p] = residual_block(x[p], self._params['TRAINING'],
                                          batch_norm=self._params['BATCH_NORM'],
                                          name=name,
                                          reuse=reuse)

                name = "downsample"
                reuse = False
                if not sharing:
                    name += "_plane{0}".format(p)
                if sharing and p != 0:
                    reuse = True

                name += "_{0}".format(i)

                if verbose:
                    print "Name: {0} + reuse: {1}".format(name, reuse)

                network_filters[p].append(x[p])
                x[p] = downsample_block(x[p], self._params['TRAINING'],
                                        batch_norm=self._params['BATCH_NORM'],
                                        name=name,
                                        reuse=reuse)

                # print "Plane {p}, layer {i}: x[{p}].get_shape(): {s}".format(
                #     p=p, i=i, s=x[p].get_shape())

        # print "Reached the deepest layer."

        # Here, concatenate all the planes together before the residual block:
        x = tf.concat(x, axis=-1)
        # print "Shape after concat: " + str(x.get_shape())

        # At the bottom, do another residual block:
        for j in xrange(self._params['RESIDUAL_BLOCKS_DEEPEST_LAYER']):
            x = residual_block(x, self._params['TRAINING'],
                batch_norm=self._params['BATCH_NORM'], name="deepest_block_{0}".format(j))

        # print "Shape after deepest block: " + str(x.get_shape())

        # Need to split the network back into n_planes
        # The deepest block doesn't change the shape, so
        # it's easy to split:
        x = tf.split(x, n_planes, -1)

        # for p in range(len(x)):
        #     print x[p].get_shape()

        # print "Upsampling now."


        # Come back up the network:
        for p in xrange(len(x)):
            for i in xrange(self._params['NETWORK_DEPTH']-1, -1, -1):

                # print "Up start, Plane {p}, layer {i}: x[{p}].get_shape(): {s}".format(
                #     p=p, i=i, s=x[p].get_shape())

                # How many filters to return from upsampling?
                n_filters = network_filters[p][-1].get_shape().as_list()[-1]


                name = "upsample"
                reuse = False
                if not sharing:
                    name += "_plane{0}".format(p)
                if sharing and p != 0:
                    reuse = True

                name += "_{0}".format(i)
                if verbose:
                    print "Name: {0} + reuse: {1}".format(name, reuse)

                # Upsample:
                x[p] = upsample_block(x[p],
                                      self._params['TRAINING'],
                                      batch_norm=self._params['BATCH_NORM'],
                                      n_output_filters=n_filters,
                                      name=name,
                                      reuse=reuse)


                x[p] = tf.concat([x[p], network_filters[p][-1]],
                                  axis=-1, name='up_concat_plane{0}_{1}'.format(p,i))

                # Remove the recently concated filters:
                network_filters[p].pop()
                # with tf.variable_scope("bottleneck_plane{0}_{1}".format(p,i)):

                name = "BottleneckUpsample"
                reuse = False
                if not sharing:
                    name += "_plane{0}".format(p)
                if sharing and p != 0:
                    reuse = True

                name += "_{0}".format(i)

                if verbose:
                    print "Name: {0} + reuse: {1}".format(name, reuse)


                # Include a bottleneck to reduce the number of filters after upsampling:
                x[p] = tf.layers.conv2d(x[p],
                                        n_filters,
                                        kernel_size=[1,1],
                                        strides=[1,1],
                                        padding='same',
                                        activation=None,
                                        use_bias=False,
                                        reuse=reuse,
                                        trainable=self._params['TRAINING'],
                                        name=name)

                x[p] = tf.nn.relu(x[p])

                # Residual
                for j in xrange(self._params['RESIDUAL_BLOCKS_PER_LAYER']):
                    name = "resblock_up"
                    reuse = False
                    if not sharing:
                        name += "_plane{0}".format(p)
                    if sharing and p != 0:
                        reuse = True

                    name += "_{0}_{1}".format(i, j)

                    if verbose:
                        print "Name: {0} + reuse: {1}".format(name, reuse)


                    x[p] = residual_block(x[p], self._params['TRAINING'],
                                          batch_norm=self._params['BATCH_NORM'],
                                          reuse=reuse,
                                          name=name)

                # print "Up end, Plane {p}, layer {i}: x[{p}].get_shape(): {s}".format(
                #     p=p, i=i, s=x[p].get_shape())

        # Split here for segmentation labeling and vertex finding.

        presplit_filters = [ layer for layer in x ]

        for p in xrange(len(x)):
            name = "FinalResidualBlock"
            reuse = False
            if not sharing:
                name += "_plane{0}".format(p)
            if sharing and p != 0:
                reuse = True

            if verbose:
                print "Name: {0} + reuse: {1}".format(name, reuse)


            x[p] = residual_block(x[p],
                    self._params['TRAINING'],
                    batch_norm=self._params['BATCH_NORM'],
                    reuse=reuse,
                    name=name)

            name = "BottleneckConv2D"
            reuse = False
            if not sharing:
                name += "_plane{0}".format(p)
            if sharing and p != 0:
                reuse = True

            if verbose:
                print "Name: {0} + reuse: {1}".format(name, reuse)


            # At this point, we ought to have a network that has the same shape as the initial input, but with more filters.
            # We can use a bottleneck to map it onto the right dimensions:
            x[p] = tf.layers.conv2d(x[p],
                                 self._params['NUM_LABELS'],
                                 kernel_size=[7,7],
                                 strides=[1, 1],
                                 padding='same',
                                 activation=None,
                                 use_bias=False,
                                 trainable=self._params['TRAINING'],
                                 reuse=reuse,
                                 name=name)

        seg_logits = x
            # print x[p].get_shape()
        # The final activation is softmax across the pixels.  It gets applied in the loss function
#         x = tf.nn.softmax(x)

        if self._params['VERTEX_FINDING']:
            x_vtx = presplit_filters
            for p in xrange(len(x_vtx)):
                name = "FinalResidualBlockVertex"
                reuse = False
                if not sharing:
                    name += "_plane{0}".format(p)
                if sharing and p != 0:
                    reuse = True

                if verbose:
                    print "Name: {0} + reuse: {1}".format(name, reuse)


                x_vtx[p] = residual_block(x_vtx[p],
                        self._params['TRAINING'],
                        batch_norm=self._params['BATCH_NORM'],
                        reuse=reuse,
                        name=name)

                name = "BottleneckConv2DVertex"
                reuse = False
                if not sharing:
                    name += "_plane{0}".format(p)
                if sharing and p != 0:
                    reuse = True

                if verbose:
                    print "Name: {0} + reuse: {1}".format(name, reuse)


                # At this point, we ought to have a network that has the same shape as the initial input, but with more filters.
                # We can use a bottleneck to map it onto the right dimensions:
                x_vtx[p] = tf.layers.conv2d(x_vtx[p],
                                     1,
                                     kernel_size=[5,5],
                                     strides=[1, 1],
                                     padding='same',
                                     activation=None,
                                     use_bias=False,
                                     trainable=self._params['TRAINING'],
                                     reuse=reuse,
                                     name=name)

                # This comes out with one filter but it should really by one dimension reduced:
                x_vtx[p] = tf.squeeze(x_vtx[p], axis=-1)

            vertex_logits = x_vtx
        else:
            vertex_logits = None


        return seg_logits, vertex_logits
コード例 #10
0
ファイル: models.py プロジェクト: anniegao2007/faces-BIM
def get_training_model(width, height, bs=1, bi_style=False):
    input_o = layers.Input(shape=(height, width, 3),
                           dtype='float32',
                           name='input_o')

    c1 = layers.Conv2D(32, (9, 9), strides=1, padding='same',
                       name='conv_1')(input_o)
    c1 = layers.BatchNormalization(name='normal_1')(c1)
    c1 = layers.Activation('relu', name='relu_1')(c1)

    c2 = layers.Conv2D(64, (3, 3), strides=2, padding='same',
                       name='conv_2')(c1)
    c2 = layers.BatchNormalization(name='normal_2')(c2)
    c2 = layers.Activation('relu', name='relu_2')(c2)

    c3 = layers.Conv2D(128, (3, 3), strides=2, padding='same',
                       name='conv_3')(c2)
    c3 = layers.BatchNormalization(name='normal_3')(c3)
    c3 = layers.Activation('relu', name='relu_3')(c3)

    r1 = residual_block(c3, 1)
    r2 = residual_block(r1, 2)
    r3 = residual_block(r2, 3)
    r4 = residual_block(r3, 4)
    r5 = residual_block(r4, 5)

    d1 = layers.Conv2DTranspose(64, (3, 3),
                                strides=2,
                                padding='same',
                                name='conv_4')(r5)
    d1 = layers.BatchNormalization(name='normal_4')(d1)
    d1 = layers.Activation('relu', name='relu_4')(d1)

    d2 = layers.Conv2DTranspose(32, (3, 3),
                                strides=2,
                                padding='same',
                                name='conv_5')(d1)
    d2 = layers.BatchNormalization(name='normal_5')(d2)
    d2 = layers.Activation('relu', name='relu_5')(d2)

    c4 = layers.Conv2D(3, (9, 9), strides=1, padding='same', name='conv_6')(d2)
    c4 = layers.BatchNormalization(name='normal_6')(c4)
    c4 = layers.Activation('tanh', name='tanh_1')(c4)
    c4 = OutputScale(name='output')(c4)

    content_activation = layers.Input(shape=(height // 2, width // 2, 128),
                                      dtype='float32')
    style_activation1 = layers.Input(shape=(height, width, 64),
                                     dtype='float32')
    style_activation2 = layers.Input(shape=(height // 2, width // 2, 128),
                                     dtype='float32')
    style_activation3 = layers.Input(shape=(height // 4, width // 4, 256),
                                     dtype='float32')
    style_activation4 = layers.Input(shape=(height // 8, width // 8, 512),
                                     dtype='float32')

    if bi_style:
        style_activation1_2 = layers.Input(shape=(height, width, 64),
                                           dtype='float32')
        style_activation2_2 = layers.Input(shape=(height // 2, width // 2,
                                                  128),
                                           dtype='float32')
        style_activation3_2 = layers.Input(shape=(height // 4, width // 4,
                                                  256),
                                           dtype='float32')
        style_activation4_2 = layers.Input(shape=(height // 8, width // 8,
                                                  512),
                                           dtype='float32')

    total_variation_loss = layers.Lambda(get_tv_loss,
                                         output_shape=(1, ),
                                         name='tv',
                                         arguments={
                                             'width': width,
                                             'height': height
                                         })([c4])

    # Block 1
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv1')(c4)
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv2')(x)
    style_loss1 = layers.Lambda(get_style_loss,
                                output_shape=(1, ),
                                name='style1',
                                arguments={'batch_size':
                                           bs})([x, style_activation1])
    if bi_style:
        style_loss1_2 = layers.Lambda(get_style_loss,
                                      output_shape=(1, ),
                                      name='style1_2',
                                      arguments={'batch_size':
                                                 bs})([x, style_activation1_2])
        style_loss1 = AverageAddTwo(name='style1_out')(
            [style_loss1, style_loss1_2])
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = layers.Conv2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv1')(x)
    x = layers.Conv2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv2')(x)
    content_loss = layers.Lambda(get_content_loss,
                                 output_shape=(1, ),
                                 name='content')([x, content_activation])
    style_loss2 = layers.Lambda(get_style_loss,
                                output_shape=(1, ),
                                name='style2',
                                arguments={'batch_size':
                                           bs})([x, style_activation2])
    if bi_style:
        style_loss2_2 = layers.Lambda(get_style_loss,
                                      output_shape=(1, ),
                                      name='style2_2',
                                      arguments={'batch_size':
                                                 bs})([x, style_activation2_2])
        style_loss2 = AverageAddTwo(name='style2_out')(
            [style_loss2, style_loss2_2])
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    # Block 3
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv1')(x)
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv2')(x)
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv3')(x)
    style_loss3 = layers.Lambda(get_style_loss,
                                output_shape=(1, ),
                                name='style3',
                                arguments={'batch_size':
                                           bs})([x, style_activation3])
    if bi_style:
        style_loss3_2 = layers.Lambda(get_style_loss,
                                      output_shape=(1, ),
                                      name='style3_2',
                                      arguments={'batch_size':
                                                 bs})([x, style_activation3_2])
        style_loss3 = AverageAddTwo(name='style3_out')(
            [style_loss3, style_loss3_2])
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    # Block 4
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv1')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv2')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv3')(x)
    style_loss4 = layers.Lambda(get_style_loss,
                                output_shape=(1, ),
                                name='style4',
                                arguments={'batch_size':
                                           bs})([x, style_activation4])
    if bi_style:
        style_loss4_2 = layers.Lambda(get_style_loss,
                                      output_shape=(1, ),
                                      name='style4_2',
                                      arguments={'batch_size':
                                                 bs})([x, style_activation4_2])
        style_loss4 = AverageAddTwo(name='style4_out')(
            [style_loss4, style_loss4_2])
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    # Block 5
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv1')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv2')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv3')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

    if bi_style:
        model = Model([
            input_o, content_activation, style_activation1, style_activation2,
            style_activation3, style_activation4, style_activation1_2,
            style_activation2_2, style_activation3_2, style_activation4_2
        ], [
            content_loss, style_loss1, style_loss2, style_loss3, style_loss4,
            total_variation_loss, c4
        ])
    else:
        model = Model([
            input_o, content_activation, style_activation1, style_activation2,
            style_activation3, style_activation4
        ], [
            content_loss, style_loss1, style_loss2, style_loss3, style_loss4,
            total_variation_loss, c4
        ])
    model_layers = {layer.name: layer for layer in model.layers}
    original_vgg = vgg16.VGG16(weights='imagenet', include_top=False)
    original_vgg_layers = {layer.name: layer for layer in original_vgg.layers}

    # load image_net weight
    for layer in original_vgg.layers:
        if layer.name in model_layers:
            model_layers[layer.name].set_weights(
                original_vgg_layers[layer.name].get_weights())
            model_layers[layer.name].trainable = False

    print("training model built successfully!")
    return model
コード例 #11
0
ファイル: A2C_selfAttention.py プロジェクト: gyh75520/A2C_Exp
    def __init__(self,
                 sess,
                 ob_space,
                 ac_space,
                 n_env,
                 n_steps,
                 n_batch,
                 n_lstm=256,
                 reuse=False,
                 layers=None,
                 cnn_extractor=nature_cnn,
                 layer_norm=False,
                 feature_extraction="cnn",
                 **kwargs):
        # super(LstmPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm, reuse,
        #                                  scale=(feature_extraction == "cnn"))
        # add this function to LstmPolicy to init ActorCriticPolicy
        self.AC_init(sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm,
                     reuse, feature_extraction)

        with tf.variable_scope("model", reuse=reuse):
            extracted_features = cnn_extractor(self.processed_x,
                                               **kwargs)  # # [B,H,W,Deepth]
            print('extracted_features', extracted_features)
            coor = get_coor(extracted_features)
            # [B,Height,W,D+2]
            entities = tf.concat([extracted_features, coor], axis=3)
            print('entities:', entities)
            # [B,H*W,num_heads,Deepth=D+2]
            MHDPA_output, weights = MHDPA(entities,
                                          "extracted_features",
                                          num_heads=2)
            print('MHDPA_output', MHDPA_output)
            self.attention = weights
            # [B,H*W,num_heads,Deepth]
            residual_output = residual_block(entities, MHDPA_output)
            print('residual_output', residual_output)

            # max_pooling
            residual_maxpooling_output = tf.reduce_max(residual_output,
                                                       axis=[1])

            print('residual_maxpooling_output', residual_maxpooling_output)
            input_sequence = batch_to_seq(residual_maxpooling_output,
                                          self.n_env, n_steps)
            # input_sequence = batch_to_seq(extracted_features, self.n_env, n_steps)
            masks = batch_to_seq(self.masks_ph, self.n_env, n_steps)
            rnn_output, self.snew = lstm(input_sequence,
                                         masks,
                                         self.states_ph,
                                         'lstm1',
                                         n_hidden=n_lstm,
                                         layer_norm=layer_norm)

            rnn_output = seq_to_batch(rnn_output)
            # print('rnn_output', rnn_output, '      snew', self.snew)

            value_fn = linear(rnn_output, 'vf', 1)

            self.proba_distribution, self.policy, self.q_value = \
                self.pdtype.proba_distribution_from_latent(rnn_output, rnn_output)

        self.value_fn = value_fn
        self.initial_state = np.zeros((self.n_env, n_lstm * 2),
                                      dtype=np.float32)
        self._setup_init()
コード例 #12
0
def build_discriminator(input_tensor,
                        n_initial_filters=12,
                        n_blocks = 2,
                        is_training=True,
                        alpha=0.2,
                        reuse=False):
    """
    Build a DC GAN discriminator with deep convolutional layers
    Input_tensor is assumed to be reshaped into a (BATCH, L, H, F) type format (rectangular)
    """
    
    
    
    with tf.variable_scope("discriminator", reuse=reuse):
        # Map the input to a small but many-filtered set up:
        
        x = tf.layers.conv2d(input_tensor,
                             n_initial_filters,
                             kernel_size=[3, 3],
                             strides=[1, 1],
                             padding='same',
                             activation=None,
                             use_bias=False,
                             kernel_initializer=None,  # automatically uses Xavier initializer
                             kernel_regularizer=None,
                             activity_regularizer=None,
                             trainable=is_training,
                             name="Conv2D",
                             reuse=reuse)
    
    
        # Apply residual mappings and upsamplings:
        for block in xrange(n_blocks):
            x = residual_block(x, is_training = is_training,
                               kernel=[3, 3], stride=[1, 1],
                               alpha=alpha,
                               name="res_block_{}".format(block),
                               reuse=reuse)
    
            x = downsample_block(x, is_training = is_training,
                                 kernel=[3, 3],
                                 stride=[1, 1],
                                 alpha=alpha,
                                 name="res_block_upsample_{}".format(block),
                                 reuse=reuse)
    
    
        # Apply a 1x1 convolution to map to just one output filter:
        x = tf.layers.conv2d(x,
                             1,
                             kernel_size=[3, 3],
                             strides=[1, 1],
                             padding='same',
                             activation=None,
                             use_bias=False,
                             kernel_initializer=None,  # automatically uses Xavier initializer
                             kernel_regularizer=None,
                             activity_regularizer=None,
                             trainable=is_training,
                             name="FinalConv2D1x1",
                             reuse=reuse)
        
        #Apply global average pooling to the final layer, then sigmoid activation
        
        # For global average pooling, need to get the shape of the input:
        shape = (x.shape[1], x.shape[2])

        x = tf.nn.pool(x,
                       window_shape=shape,
                       pooling_type="AVG",
                       padding="VALID",
                       dilation_rate=None,
                       strides=None,
                       name="GlobalAveragePool",
                       data_format=None)
        x = tf.reshape(x, (-1, 1))
            
        
        # For the final activation, apply sigmoid:
        return tf.nn.sigmoid(x)
コード例 #13
0
def build_discriminator_progressive(input_tensor,
                                    leaky_relu_param=0.0,
                    n_filters=64,
                    n_blocks = 2,
                    is_training=True,
                    reuse=True,):

    """
    This function will build a discriminator to decide if an image is real
    or fake.
    Starting at a low resolution of 4x4 output, and gradually increasing

    To make it easy to reuse weights, every level has a fixed number of
    filters

    """

    with tf.variable_scope("discriminator_progressive", reuse = tf.AUTO_REUSE):

        # Map the initial set of random numbers to a 4x4xn_initial_filters space:

        x = tf.layers.conv2d(input_tensor,
                             n_filters,
                             kernel_size=[3, 3],
                             strides=[1, 1],
                             padding='same',
                             activation=None,
                             use_bias=False,
                             kernel_initializer=None,  # automatically uses Xavier initializer
                             kernel_regularizer=None,
                             activity_regularizer=None,
                             trainable=is_training,
                             name="Conv2D")
    
        current_size = int(x.get_shape()[1])

        while current_size > 4:
            current_size = int(x.get_shape()[1])

            next_size = int(0.5*current_size)
            subname = "{}to{}".format(current_size, next_size)
            
            x = residual_block(x,
                               is_training,
                               alpha=leaky_relu_param,
                               name="res_block_{}".format(subname))


            x = downsample_block(x,
                                 is_training,
                                 alpha=leaky_relu_param,
                                 name="downsample_block_{}".format(subname))


        # For global average pooling, need to get the shape of the input:
        shape = (x.shape[1], x.shape[2])

        x = tf.nn.pool(x,
                       window_shape=shape,
                       pooling_type="AVG",
                       padding="VALID",
                       dilation_rate=None,
                       strides=None,
                       name="GlobalAveragePool",
                       data_format=None)
        x = tf.reshape(x, (-1, 1))
            
        
        # For the final activation, apply sigmoid:
        return tf.nn.sigmoid(x)

    return x