示例#1
0
def build_network(x, n_layers, hidden_layer_nodes, keep_prob, training_phase):
    assert n_layers == len(
        hidden_layer_nodes
    ), 'Specified layer nodes and number of layers do not correspond.'
    layers = [x]
    with tf.variable_scope('BN_layers') as scope:
        hidden_1 = BN_layer_ops(
            x,
            shape=[config.n_features, hidden_layer_nodes[0]],
            name='BNhidden0',
            keep_prob=keep_prob,
            phase=training_phase)
        layers.append(hidden_1)
        for n in range(0, n_layers - 1):
            hidden_n = BN_layer_ops(
                layers[-1],
                shape=[hidden_layer_nodes[n], hidden_layer_nodes[n + 1]],
                name='BNhidden{}'.format(n + 1),
                keep_prob=keep_prob,
                phase=training_phase)
            layers.append(hidden_n)
        readout = readout_ops(layers[-1],
                              shape=[hidden_layer_nodes[-1], config.n_classes],
                              name='readout')

        return readout
示例#2
0
def began_autoencoder(x,
                      l2_penalty=0.0,
                      n_hidden=128,
                      n_output=64,
                      out_fn=None,
                      trainable=True):
    l = x
    layers = []
    act_fn = tf.nn.elu
    n_repeat = int(np.log2(int(l.get_shape()[0]) / 8)) + 1
    reg = tf.contrib.layers.l2_regularizer(
        l2_penalty) if l2_penalty > 0.0 else None
    init = get_initializer(act_fn)
    l = tf.layers.conv2d(l,
                         n_hidden,
                         3,
                         1,
                         activation=act_fn,
                         padding='same',
                         trainable=trainable,
                         kernel_initializer=init)
    layers.append(l)
    for idx in range(n_repeat):
        n_channel = n_hidden * (idx + 1)
        l = tf.layers.conv2d(l,
                             n_channel,
                             3,
                             1,
                             activation=act_fn,
                             padding='same',
                             trainable=trainable,
                             kernel_regularizer=reg,
                             kernel_initializer=init)
        layers.append(l)
        l = tf.layers.conv2d(l,
                             n_channel,
                             3,
                             1,
                             activation=act_fn,
                             padding='same',
                             trainable=trainable,
                             kernel_regularizer=reg,
                             kernel_initializer=init)
        layers.append(l)
        if idx < n_repeat - 1:
            l = tf.layers.conv2d(l,
                                 n_channel,
                                 3,
                                 2,
                                 activation=act_fn,
                                 padding='same',
                                 trainable=trainable,
                                 kernel_regularizer=reg,
                                 kernel_initializer=init)
            layers.append(l)

    l = tf.reshape(l, [tf.shape(l)[0], np.prod(l.get_shape().as_list()[1:])])
    l = tf.layers.dense(l, n_output, activation=out_fn)
    layers.append(l)
    return layers
示例#3
0
def my_discriminator(l,
                     l2_penalty,
                     n_output,
                     n_hidden=64,
                     act_fn=lambda x: tf.maximum(0.2 * x, x),
                     out_fn=None,
                     use_batchnorm=True):
    # NOTE: this is built after the figure in the DCGAN paper, but the
    # actual published code used a different arch
    reg = tf.contrib.layers.l2_regularizer(
        l2_penalty) if l2_penalty > 0.0 else None
    layers = []
    n_repeat = int(np.log2(int(l.get_shape()[0]) / 8))
    for i in range(n_repeat):
        with tf.name_scope('disc.%02d' % i):
            l = tf.layers.conv2d(l,
                                 n_hidden,
                                 kernel_size=5,
                                 strides=2,
                                 padding='same',
                                 activation=None,
                                 kernel_regularizer=reg)
            if use_batchnorm:
                l = tf.layers.batch_normalization(l, training=True)
            l = act_fn(l)
            layers.append(l)
            n_hidden *= 2
    l = tf.reshape(l, [tf.shape(l)[0], np.prod(l.get_shape().as_list()[1:])])
    l = tf.layers.dense(l, n_output, activation=out_fn)
    layers.append(l)
    return layers
示例#4
0
    def _make_layer(self,
                    block,
                    planes,
                    blocks,
                    shortcut_type,
                    stride=1,
                    training=False):
        downsample = None
        stride_p = stride  # especially for downsample branch.

        if self.cnt < self.depth_3d:
            if self.cnt == 0:
                stride_p = 1
            else:
                stride_p = [2, 2, 1]
            if stride != 1 or self.inplanes != planes * block.expansion:
                downsample = Sequential([
                    Conv3D(planes * block.expansion,
                           filter_size=1,
                           strides=stride_p,
                           name='conv_down'),
                    BatchNormal(name='batch_down', training=training),
                ])

        else:
            if stride != 1 or self.inplanes != planes * block.expansion:
                downsample = Sequential([
                    Conv2D(planes * block.expansion,
                           filter_size=1,
                           strides=2,
                           name='conv_down'),
                    BatchNormal(name='batch_down', training=training)
                ])
        layers = []
        layers.append(
            block(planes,
                  stride,
                  downsample,
                  n_s=self.cnt,
                  depth_3d=self.depth_3d,
                  ST_struc=self.ST_struc,
                  name='Bottleneck0',
                  training=training))
        self.cnt += 1

        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(planes,
                      n_s=self.cnt,
                      depth_3d=self.depth_3d,
                      ST_struc=self.ST_struc,
                      name='Bottleneck{}'.format(i),
                      training=training))
            self.cnt += 1

        return Sequential(layers)
示例#5
0
def shallow_wide_cnn(x, filter_widths, filter_channel):

    layers = []
    x_dim = x.get_shape()[2].value
    x_width = x.get_shape()[1].value
    for idx, filter_width in enumerate(filter_widths):
        with tf.variable_scope('filter_{}'.format(idx)) as scope:
            W = tf.get_variable(name='conv_W',
                                shape=[filter_width, x_dim, filter_channel],
                                initializer=initializers.get("glorot_uniform"))
            conved = tf.nn.conv1d(value=x,
                                  filters=W,
                                  stride=1,
                                  padding='VALID')
            pooled = tf.reduce_max(conved, axis=1)
            layers.append(pooled)

    return tf.concat(layers, axis=1)
示例#6
0
def began_generator(z, img_shape, l2_penalty, act_fn=tf.nn.elu, out_fn=None):
    reg = tf.contrib.layers.l2_regularizer(
        l2_penalty) if l2_penalty > 0.0 else None

    layers = []
    n_repeat = int(np.log2(img_shape[0] / 4))
    n_hidden = 128
    l = tf.layers.dense(z,
                        8 * 8 * n_hidden,
                        activation=None,
                        kernel_regularizer=reg)
    layers.append(l)
    l = tf.reshape(l, [-1, 8, 8, n_hidden])
    for idx in range(n_repeat):
        l = tf.layers.conv2d(l,
                             n_hidden,
                             3,
                             1,
                             activation=act_fn,
                             padding='same',
                             kernel_regularizer=reg,
                             kernel_initializer=get_initializer(act_fn))
        layers.append(l)
        l = tf.layers.conv2d(l,
                             n_hidden,
                             3,
                             1,
                             activation=act_fn,
                             padding='same',
                             kernel_regularizer=reg,
                             kernel_initializer=get_initializer(act_fn))
        layers.append(l)
        if idx < n_repeat - 1:
            l = scale(l, 2)
            layers.append(l)
    l = tf.layers.conv2d(l,
                         img_shape[-1],
                         3,
                         1,
                         activation=out_fn,
                         padding='same',
                         kernel_regularizer=reg)
    layers.append(l)
    return layers
示例#7
0
def dcgan_generator(z,
                    img_shape,
                    l2_penalty,
                    act_fn=tf.nn.relu,
                    out_fn=tf.nn.tanh,
                    use_batchnorm=True):
    reg = tf.contrib.layers.l2_regularizer(
        l2_penalty) if l2_penalty > 0.0 else None
    layers = []
    n_filter = 512
    img_res = 1
    l = tf.reshape(z, [-1, 1, 1, int(np.prod(z.get_shape()[1:]))])
    while img_res < img_shape[0] // 4:
        s = 2 if n_filter != 512 else 1  # first stride is smaller
        p = 'same' if n_filter != 512 else 'valid'  # first stride is smaller
        l = tf.layers.conv2d_transpose(
            l,
            filters=n_filter,
            kernel_size=4,
            strides=s,
            padding=p,
            activation=None,
            kernel_regularizer=reg,
            data_format='channels_last',
            kernel_initializer=get_initializer(act_fn))
        if use_batchnorm:
            l = tf.layers.batch_normalization(l, training=True)
        l = act_fn(l)
        layers.append(l)
        img_res *= 2
        n_filter //= 2
    l = tf.layers.conv2d_transpose(l,
                                   filters=img_shape[-1],
                                   kernel_size=4,
                                   strides=2,
                                   padding='same',
                                   activation=out_fn,
                                   kernel_regularizer=reg,
                                   data_format='channels_last')
    layers.append(l)
    #print('GENERATOR:\n' + '\n'.join([str(l) for l in layers]))
    return layers
示例#8
0
def network_builder(x, n_layers, hidden_layer_nodes, keep_prob,
                    training_phase):
    assert n_layers == len(
        hidden_layer_nodes
    ), 'Specified layer nodes and number of layers do not correspond.'
    layers = [x]
    if config.builder == 'bn':
        print('Building ReLU + Batch-norm architecture')
        builder = BN_layer_ops
    elif config.builder == 'selu':
        print('Building SELU architecture')
        builder = hidden_SELU_ops
    elif config.builder == 'selu-bn':
        print('Building SELU + Batch-norm architecture')
        builder = SELU_BN_layer_ops
    else:
        print('Default architecture: SELU')
        builder = hidden_SELU_ops

    with tf.variable_scope('hidden_layers') as scope:
        hidden_1 = builder(x,
                           shape=[config.n_features, hidden_layer_nodes[0]],
                           name='hidden0',
                           keep_prob=keep_prob,
                           phase=training_phase)
        layers.append(hidden_1)
        for n in range(0, n_layers - 1):
            hidden_n = builder(
                layers[-1],
                shape=[hidden_layer_nodes[n], hidden_layer_nodes[n + 1]],
                name='hidden{}'.format(n + 1),
                keep_prob=keep_prob,
                phase=training_phase)
            layers.append(hidden_n)
        readout = readout_ops(layers[-1],
                              shape=[hidden_layer_nodes[-1], config.n_classes],
                              name='readout',
                              initializer=SELU_initializer)

        return readout
示例#9
0
    def _build(self, inputs):
        #max_word_length = inputs.get_shape()[1]
        max_word_length = tf.shape(inputs)[1]#xxx1
        embed_size = inputs.get_shape()[-1]
        
        inputs = tf.expand_dims(inputs, 1)
        
        layers = []
        self.conv_layers = []
        for kernel_size, kernel_feature_size in zip(self._kernels, self._kernel_features):

            ## [batch_size x max_word_length x embed_size x kernel_feature_size]
            conv_fxn = snt.Conv2D(output_channels=kernel_feature_size,
                                  kernel_shape=[1, kernel_size],# ?? [kernel_size,1] ??
                                  initializers=self._initializers,
                                  padding='VALID')
            conv = conv_fxn(inputs)
            self.conv_layers.append(conv_fxn)
            
#             ## [batch_size x 1 x 1 x kernel_feature_size]
#             reduced_length = max_word_length - kernel_size + 1
#             pool = tf.nn.max_pool(tf.tanh(conv), 
#                                   ksize= [1,1,reduced_length,1], 
#                                   strides= [1,1,1,1],
#                                   padding= 'VALID')

            # https://stackoverflow.com/questions/43574076/tensorflow-maxpool-with-dynamic-ksize#xxx2
            pool = tf.reduce_max(tf.tanh(conv),
                                 axis=2,
                                 keepdims=True
                                 )
            layers.append(tf.squeeze(pool, [1, 2]))
            
        if len(self._kernels) > 1:
            output = tf.concat(layers, 1)
        else:
            output = layers[0]
        return output
示例#10
0
def dc_bilstm(config, x, x_nums, rnn_cells):
    '''
    Densely connected biLSTM
    '''
    max_x_num = x.get_shape()[1]
    layers = [x]
    layer_num = len(rnn_cells)

    for i in range(layer_num):
        layers.append(
            bidirectional_rnn(rnn_cells[i],
                              rnn_cells[i],
                              tf.concat(layers, axis=2),
                              x_nums,
                              scope=None)[0])

    pooled = tf.layers.average_pooling1d(
        inputs=layers[-1],
        pool_size=[1, max_x_num, 1],
        strides=1,
    )

    return tf.reshape(pooled, [-1, pooled.get_shape()[-1]])
示例#11
0
def dcgan_discriminator(l,
                        l2_penalty,
                        n_output,
                        n_hidden=64,
                        act_fn=lambda x: tf.maximum(0.2 * x, x),
                        out_fn=None,
                        use_batchnorm=True):
    reg = tf.contrib.layers.l2_regularizer(
        l2_penalty) if l2_penalty > 0.0 else None
    layers = []
    n_repeat = int(np.log2(int(l.get_shape()[1]))) - 2
    for i in range(n_repeat):
        l = tf.layers.conv2d(l,
                             n_hidden,
                             kernel_size=4,
                             strides=2,
                             padding='same',
                             activation=None,
                             kernel_regularizer=reg)
        if use_batchnorm:
            l = tf.layers.batch_normalization(l, training=True)
        l = act_fn(l)
        layers.append(l)
        n_hidden *= 2

    l = tf.layers.conv2d(l,
                         n_output,
                         kernel_size=4,
                         strides=1,
                         padding='valid',
                         activation=None,
                         kernel_regularizer=reg)
    l = tf.squeeze(l)
    layers.append(l)
    #print('DISCRIMINATOR:\n' + '\n'.join([str(l) for l in layers]))
    return layers
示例#12
0
def my_generator(z,
                 img_shape,
                 l2_penalty,
                 act_fn=tf.nn.relu,
                 out_fn=tf.nn.tanh,
                 use_batchnorm=True):
    # NOTE: this is built after the figure in the DCGAN paper, but the
    # actual published code used a different arch
    reg = tf.contrib.layers.l2_regularizer(
        l2_penalty) if l2_penalty > 0.0 else None

    layers = []
    n_filter = 1024
    img_res = 4
    l = tf.layers.dense(z,
                        img_res * img_res * n_filter,
                        activation=tf.nn.relu,
                        name="layer0")
    l = tf.reshape(l, [-1, img_res, img_res, n_filter])
    layers.append(l)
    i = 0
    while img_res < img_shape[0] // 2:
        with tf.name_scope('gen.%02d' % i):
            n_filter //= 2
            l = tf.layers.conv2d_transpose(
                l,
                filters=n_filter,
                kernel_size=5,
                strides=2,
                padding='same',
                activation=None,
                kernel_regularizer=reg,
                data_format='channels_last',
                kernel_initializer=get_initializer(act_fn))
            if use_batchnorm:
                l = tf.layers.batch_normalization(l, training=True)
            l = act_fn(l)
            layers.append(l)
            img_res *= 2
            i += 1
    l = tf.layers.conv2d_transpose(l,
                                   filters=img_shape[-1],
                                   kernel_size=5,
                                   strides=2,
                                   padding='same',
                                   activation=out_fn,
                                   kernel_regularizer=reg,
                                   data_format='channels_last')
    layers.append(l)
    return layers
示例#13
0
def vdcnn(x,
          filter_width=3,
          init_channel=64,
          num_layers=[2, 2, 2, 2],
          use_shortcut=False,
          k=8,
          is_training=True,
          scope=None):
    layers = []
    x_dim = x.get_shape()[2]

    with tf.variable_scope("temp_conv"):
        filter_shape = [filter_width, x_dim, init_channel]
        W = tf.get_variable(name='temp_1',
                            shape=filter_shape,
                            initializer=initializers.get("glorot_uniform"))
        x = tf.nn.conv1d(x, W, stride=1, padding="SAME")
        layers.append(x)

    now_channel_size = init_channel

    for i, num_layer in enumerate(num_layers):
        for j in range(num_layer):
            with tf.variable_scope("%d_layer_%d_cnn" % (i, j)) as scope:
                shortcut = None
                if use_shortcut and i < len(num_layers) - 1:
                    shortcut = layers[-1]
                conv_ = conv_block(layers[-1], shortcut, filter_width,
                                   now_channel_size, is_training, scope)
                layers.append(conv_)

        if i == len(num_layers) - 1:
            break

        with tf.variable_scope("%d_layer_pool" % (i)) as scope:
            shortcut = None
            if use_shortcut:
                shortcut = layers[-1]
            pool_ = pool_block(layers[-1], shortcut, filter_width, scope)
            layers.append(pool_)

        now_channel_size *= 2

    k_pooled = tf.nn.top_k(tf.transpose(layers[-1], [0, 2, 1]),
                           k=k,
                           name='k_pool',
                           sorted=False)[0]
    flatten = tf.reshape(k_pooled, (-1, now_channel_size * k))
    return flatten
示例#14
0
def preprocess_screen(screen):
  layers = []
  assert screen.shape[0] == len(features.SCREEN_FEATURES)
  for i in range(len(features.SCREEN_FEATURES)):
    if i == _SCREEN_PLAYER_ID or i == _SCREEN_UNIT_TYPE:
      layers.append(screen[i:i+1] / features.SCREEN_FEATURES[i].scale)
    elif features.SCREEN_FEATURES[i].type == features.FeatureType.SCALAR:
      layers.append(screen[i:i+1] / features.SCREEN_FEATURES[i].scale)
    else:
      layer = np.zeros([features.SCREEN_FEATURES[i].scale, screen.shape[1], screen.shape[2]], dtype=np.float32)
      for j in range(features.SCREEN_FEATURES[i].scale):
        indy, indx = (screen[i] == j).nonzero()
        layer[j, indy, indx] = 1
      layers.append(layer)
  return np.concatenate(layers, axis=0)
示例#15
0
def preprocess_minimap(minimap):
  layers = []
  assert minimap.shape[0] == len(features.MINIMAP_FEATURES)
  for i in range(len(features.MINIMAP_FEATURES)):
    if i == _MINIMAP_PLAYER_ID:
      layers.append(minimap[i:i+1] / features.MINIMAP_FEATURES[i].scale)
    elif features.MINIMAP_FEATURES[i].type == features.FeatureType.SCALAR:
      layers.append(minimap[i:i+1] / features.MINIMAP_FEATURES[i].scale)
    else:
      layer = np.zeros([features.MINIMAP_FEATURES[i].scale, minimap.shape[1], minimap.shape[2]], dtype=np.float32)
      for j in range(features.MINIMAP_FEATURES[i].scale):
        indy, indx = (minimap[i] == j).nonzero()
        layer[j, indy, indx] = 1
      layers.append(layer)
  return np.concatenate(layers, axis=0)
示例#16
0
    def _build_ops(self, lm_graph):
        with tf.control_dependencies([lm_graph.update_state_op]):
            # get the LM embeddings
            token_embeddings = lm_graph.embedding
            layers = [
                tf.concat([token_embeddings, token_embeddings], axis=2)
            ]

            n_lm_layers = len(lm_graph.lstm_outputs['forward'])
            for i in range(n_lm_layers):
                layers.append(
                    tf.concat(
                        [lm_graph.lstm_outputs['forward'][i],
                         lm_graph.lstm_outputs['backward'][i]],
                        axis=-1
                    )
                )

            # The layers include the BOS/EOS tokens.  Remove them
            sequence_length_wo_bos_eos = lm_graph.sequence_lengths - 2
            layers_without_bos_eos = []
            for layer in layers:
                layer_wo_bos_eos = layer[:, 1:, :]
                layer_wo_bos_eos = tf.reverse_sequence(
                    layer_wo_bos_eos,
                    lm_graph.sequence_lengths - 1,
                    seq_axis=1,
                    batch_axis=0,
                )
                layer_wo_bos_eos = layer_wo_bos_eos[:, 1:, :]
                layer_wo_bos_eos = tf.reverse_sequence(
                    layer_wo_bos_eos,
                    sequence_length_wo_bos_eos,
                    seq_axis=1,
                    batch_axis=0,
                )
                layers_without_bos_eos.append(layer_wo_bos_eos)

            # concatenate the layers
            lm_embeddings = tf.concat(
                [tf.expand_dims(t, axis=1) for t in layers_without_bos_eos],
                axis=1
            )

            # get the mask op without bos/eos.
            # tf doesn't support reversing boolean tensors, so cast
            # to int then back
            mask_wo_bos_eos = tf.cast(lm_graph.mask[:, 1:], 'int32')
            mask_wo_bos_eos = tf.reverse_sequence(
                mask_wo_bos_eos,
                lm_graph.sequence_lengths - 1,
                seq_axis=1,
                batch_axis=0,
            )
            mask_wo_bos_eos = mask_wo_bos_eos[:, 1:]
            mask_wo_bos_eos = tf.reverse_sequence(
                mask_wo_bos_eos,
                sequence_length_wo_bos_eos,
                seq_axis=1,
                batch_axis=0,
            )
            mask_wo_bos_eos = tf.cast(mask_wo_bos_eos, 'bool')

        return {
            'lm_embeddings': lm_embeddings,
            'lengths': sequence_length_wo_bos_eos,
            'token_embeddings': lm_graph.embedding,
            'mask': mask_wo_bos_eos,
        }
 def extract_features(self, input=None, reuse=True):
     if input is None:
         input = self.image
     x = input
     layers = []
     with tf.variable_scope('features', reuse=reuse):
         with tf.variable_scope('layer0'):
             W = tf.get_variable("weights", shape=[3, 3, 3, 64])
             b = tf.get_variable("bias", shape=[64])
             x = tf.nn.conv2d(x, W, [1, 2, 2, 1], "VALID")
             x = tf.nn.bias_add(x, b)
             layers.append(x)
         with tf.variable_scope('layer1'):
             x = tf.nn.relu(x)
             layers.append(x)
         with tf.variable_scope('layer2'):
             x = tf.nn.max_pool(x, [1, 3, 3, 1],
                                strides=[1, 2, 2, 1],
                                padding='VALID')
             layers.append(x)
         with tf.variable_scope('layer3'):
             x = fire_module(x, 64, 16, 64, 64)
             layers.append(x)
         with tf.variable_scope('layer4'):
             x = fire_module(x, 128, 16, 64, 64)
             layers.append(x)
         with tf.variable_scope('layer5'):
             x = tf.nn.max_pool(x, [1, 3, 3, 1],
                                strides=[1, 2, 2, 1],
                                padding='VALID')
             layers.append(x)
         with tf.variable_scope('layer6'):
             x = fire_module(x, 128, 32, 128, 128)
             layers.append(x)
         with tf.variable_scope('layer7'):
             x = fire_module(x, 256, 32, 128, 128)
             layers.append(x)
         with tf.variable_scope('layer8'):
             x = tf.nn.max_pool(x, [1, 3, 3, 1],
                                strides=[1, 2, 2, 1],
                                padding='VALID')
             layers.append(x)
         with tf.variable_scope('layer9'):
             x = fire_module(x, 256, 48, 192, 192)
             layers.append(x)
         with tf.variable_scope('layer10'):
             x = fire_module(x, 384, 48, 192, 192)
             layers.append(x)
         with tf.variable_scope('layer11'):
             x = fire_module(x, 384, 64, 256, 256)
             layers.append(x)
         with tf.variable_scope('layer12'):
             x = fire_module(x, 512, 64, 256, 256)
             layers.append(x)
     return layers