def __init__(self, kernel_size=4, name=None):
        super(AutoEncoder, self).__init__(name=name)
        self.encoder = snt.Sequential([
            snt.Conv2D(4, kernel_size, stride=4, padding='SAME'), tf.nn.relu,
            snt.Conv2D(16, kernel_size, stride=4, padding='SAME'), tf.nn.relu,
            snt.Conv2D(64, kernel_size, stride=4, padding='SAME'), tf.nn.relu
        ])

        self.decoder = snt.Sequential([
            snt.Conv2DTranspose(64, kernel_size, stride=4,
                                padding='SAME'), tf.nn.relu,
            snt.Conv2DTranspose(16, kernel_size, stride=4,
                                padding='SAME'), tf.nn.relu,
            snt.Conv2DTranspose(4, kernel_size, stride=4, padding='SAME'),
            tf.nn.relu,
            snt.Conv2D(1, kernel_size, padding='SAME')
        ])
 def __init__(self, action_spec):
     super().__init__(name='r2d2_test_network')
     self._net = snt.DeepRNN([
         snt.Conv2D(32, [8, 8], [4, 4]),
         tf.nn.relu,
         snt.Conv2D(64, [4, 4], [2, 2]),
         tf.nn.relu,
         snt.Conv2D(64, [3, 3], [1, 1]),
         tf.nn.relu,
         snt.Flatten(),
         snt.LSTM(20),
         tf.nn.relu,
         #snt.LSTM(160),
         #snt.nets.MLP([50, 50,512]),
         #tf.nn.relu,
         snt.nets.MLP([50, 50, action_spec])
     ])
Example #3
0
 def testGetSaverModule(self):
   input_ = tf.placeholder(tf.float32, shape=[1, 10, 10, 3])
   conv = snt.Conv2D(output_channels=3, kernel_shape=3)
   conv(input_)
   saver = snt.get_saver(conv)
   self.assertIsInstance(saver, tf.train.Saver)
   self.assertIn("w:0", saver._var_list)
   self.assertIn("b:0", saver._var_list)
Example #4
0
 def __init__(self, channel, kernel, name='unet_block_up'):
     super(UNetBlockUp, self).__init__(name=name)
     with self._enter_variable_scope(check_same_graph=False):
         self._layers = [
             snt.Conv2D(channel, kernel, use_bias=False, name='conv'),
             snt.LayerNorm(axis=[1, 2], offset=True, scale=False, name='instance_norm'),
             partial(tf.nn.relu, name='relu'),
         ]
Example #5
0
 def _build(self, z0, is_training=False):
     """Outputs logits."""
     zk = z0
     conv2d_id = 0
     linear_id = 0
     name = None
     for spec in self._layer_types:
         if spec[0] == 'conv2d':
             if linear_id > 0:
                 raise ValueError(
                     'Convolutional layers must precede fully connected '
                     'layers.')
             name = 'conv2d_{}'.format(conv2d_id)
             conv2d_id += 1
             (_, (kernel_height, kernel_width), channels, padding,
              stride) = spec
             m = snt.Conv2D(output_channels=channels,
                            kernel_shape=(kernel_height, kernel_width),
                            padding=padding,
                            stride=stride,
                            use_bias=True,
                            regularizers=self._regularizers,
                            initializers=_create_conv2d_initializer(
                                zk.get_shape().as_list()[1:], channels,
                                (kernel_height, kernel_width)),
                            name=name)
             zk = m(zk)
         elif spec[0] == 'linear':
             must_flatten = (linear_id == 0 and len(zk.shape) > 2)
             if must_flatten:
                 zk = snt.BatchFlatten()(zk)
             name = 'linear_{}'.format(linear_id)
             linear_id += 1
             output_size = spec[1]
             m = snt.Linear(output_size,
                            regularizers=self._regularizers,
                            initializers=_create_linear_initializer(
                                np.prod(zk.get_shape().as_list()[1:]),
                                output_size),
                            name=name)
             zk = m(zk)
         elif spec[0] == 'batch_normalization':
             if name is None:
                 raise ValueError(
                     'Batch normalization only supported after linear '
                     'layers.')
             name += '_batch_norm'
             m = layers.BatchNorm(name=name)
             zk = m(zk, is_training=is_training)
         elif spec[0] == 'activation':
             if spec[1] not in _ALLOWED_ACTIVATIONS:
                 raise NotImplementedError(
                     'Only the following activations are supported {}'.
                     format(list(_ALLOWED_ACTIVATIONS)))
             name = None
             m = getattr(tf.nn, spec[1])
             zk = m(zk)
     return zk
Example #6
0
    def __init__(self, kernel_size=4, name=None):
        super(AutoEncoder, self).__init__(name=name)
        self.encoder = snt.Sequential([snt.Conv2D(4, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu,    # [4, 128, 128]
                                       snt.Conv2D(8, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu,    # [8, 64, 64]
                                       snt.Conv2D(16, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu,    # [16, 32, 32]
                                       snt.Conv2D(32, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu])    # [32, 16, 16]
                                       # snt.Conv2D(32, kernel_size, stride=2, padding='SAME'), tf.nn.relu,
                                       # snt.Conv2D(64, kernel_size, stride=2, padding='SAME'), tf.nn.relu])

        # self.decoder = snt.Sequential([snt.Conv2DTranspose(64, kernel_size, stride=2, padding='SAME'), tf.nn.relu,
        #                                snt.Conv2DTranspose(32, kernel_size, stride=2, padding='SAME'), tf.nn.relu,
        #                                snt.Conv2DTranspose(16, kernel_size, stride=2, padding='SAME'), tf.nn.relu,

        self.decoder = snt.Sequential([snt.Conv2DTranspose(32, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu, # [32, 16, 16]
                                       snt.Conv2DTranspose(16, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu, # [16, 32, 32]
                                       snt.Conv2DTranspose(8, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu, # [8, 64, 64]
                                       snt.Conv2DTranspose(4, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu, # [4, 128, 128]
                                       snt.Conv2D(1, kernel_size, padding='SAME')])    # [1, 256, 256]
def residual_stack(h, num_hiddens, num_residual_layers, num_residual_hiddens):
    for i in range(num_residual_layers):
        h_i = tf.nn.relu(h)
        
        h_i = snt.Conv2D(
              output_channels=num_residual_hiddens,
              kernel_shape=(3, 3),
              stride=(1, 1),
              name="res3x3_%d" % i)(h_i)
        h_i = tf.nn.relu(h_i)

        h_i = snt.Conv2D(
              output_channels=num_hiddens,
              kernel_shape=(1, 1),
              stride=(1, 1),
              name="res1x1_%d" % i)(h_i)
        h += h_i
    return tf.nn.relu(h)
Example #8
0
 def test_count_parameters_on_module(self):
     module = snt.Module()
     # Weights of a 2D convolution with 2 filters..
     module.conv = snt.Conv2D(output_channels=2,
                              kernel_shape=3,
                              name="conv")
     module.conv(tf.ones(
         (2, 5, 5, 3)))  # 3 * 3*3 * 2 + 2 (bias) = 56 parameters
     self.assertEqual(56, parameter_overview.count_parameters(module))
Example #9
0
    def _build(self, x):
        h = x
        for unused_i, l in enumerate(self.layers):
            h = tf.nn.relu(snt.Conv2D(l[0], l[1], l[2])(h))

        h_shape = h.get_shape().as_list()
        h = tf.reshape(h, [-1, h_shape[1] * h_shape[2] * h_shape[3]])
        logits = snt.Linear(self.output_size)(h)
        return logits
Example #10
0
  def _torso(self, input_):
    last_action, env_output = input_
    reward, _, _, (frame, instruction) = env_output

    # Convert to floats.
    frame = tf.to_float(frame)

    frame /= 255
    with tf.variable_scope('convnet'):
      conv_out = frame
      for i, (num_ch, num_blocks) in enumerate([(16, 2), (32, 2), (32, 2)]):
        # Downscale.
        conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
        conv_out = tf.nn.pool(
            conv_out,
            window_shape=[3, 3],
            pooling_type='MAX',
            padding='SAME',
            strides=[2, 2])

        # Residual block(s).
        for j in range(num_blocks):
          with tf.variable_scope('residual_%d_%d' % (i, j)):
            block_input = conv_out
            conv_out = tf.nn.relu(conv_out)
            conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
            conv_out = tf.nn.relu(conv_out)
            conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
            conv_out += block_input

    conv_out = tf.nn.relu(conv_out)
    conv_out = snt.BatchFlatten()(conv_out)

    conv_out = snt.Linear(256)(conv_out)
    conv_out = tf.nn.relu(conv_out)

    instruction_out = self._instruction(instruction)

    # Append clipped last reward and one hot last action.
    clipped_reward = tf.expand_dims(tf.clip_by_value(reward, -1, 1), -1)
    one_hot_last_action = tf.one_hot(last_action, self._num_actions)
    return tf.concat(
        [conv_out, clipped_reward, one_hot_last_action, instruction_out],
        axis=1)
Example #11
0
File: model.py Project: mingyr/san
    def __init__(self, filter_size = 3, num_filters = 32,
                 pooling_stride = 2, act = 'tanh', summ = None, name = "mapper"):
        super(Mapper, self).__init__(name = name)
        
        self._pool = Downsample2D(pooling_stride)
        self._act = Activation(act, verbose = True)
        self._bf = snt.BatchFlatten()
        self._summ = summ

        initializers = {
            'w': tf.truncated_normal_initializer(stddev = 0.02),
            'b': tf.zeros_initializer()
        }

        with self._enter_variable_scope():
            self._l1_conv = snt.Conv2D(num_filters, filter_size)
            self._l2_conv = snt.Conv2D(num_filters << 1, filter_size)
            self._lin1 = snt.Linear(256, initializers = initializers)
            self._lin2 = snt.Linear(1, initializers = initializers)
    def _build(self, inputs, verbose=VERBOSITY):

        if EncodeProcessDecode_v8.convnet_tanh:
            activation = tf.nn.tanh
        else:
            activation = tf.nn.relu

        img_shape = get_correct_image_shape(config=None, get_type="seg",
                                            depth_data_provided=EncodeProcessDecode_v8.depth_data_provided)
        img_data = tf.reshape(inputs, [-1, *img_shape])  # -1 means "all", i.e. batch dimension
        print(img_data.get_shape())

        ''' 60, 80 '''
        outputs = snt.Conv2D(output_channels=32, kernel_shape=3, stride=2, padding="SAME")(img_data)
        outputs = activation(outputs)
        if EncodeProcessDecode_v8.conv_layer_instance_norm:
            outputs = snt.BatchNorm()(outputs, is_training=self._is_training)
        print(outputs.get_shape())

        ''' 30, 40 '''
        outputs = snt.Conv2D(output_channels=32, kernel_shape=3, stride=2, padding="SAME")(outputs)
        outputs = activation(outputs)
        if EncodeProcessDecode_v8.conv_layer_instance_norm:
            outputs = snt.BatchNorm()(outputs, is_training=self._is_training)
        print(outputs.get_shape())

        ''' 15, 20 '''
        outputs = snt.Conv2D(output_channels=16, kernel_shape=3, stride=2, padding="SAME")(outputs)
        outputs = activation(outputs)
        if EncodeProcessDecode_v8.conv_layer_instance_norm:
            outputs = snt.BatchNorm()(outputs, is_training=self._is_training)
        print(outputs.get_shape())

        ''' 8, 10 '''
        outputs = snt.Conv2D(output_channels=5, kernel_shape=3, stride=2, padding="SAME")(outputs)
        outputs = activation(outputs)
        if EncodeProcessDecode_v8.conv_layer_instance_norm:
            outputs = snt.BatchNorm()(outputs, is_training=self._is_training)
        print(outputs.get_shape())

        outputs = tf.layers.flatten(outputs)  # 8,10,5 flattened

        return outputs
Example #13
0
 def _create_conv(self, partitioned, name):
   hidden = tf.ones(shape=(1, 16, 16, 3))
   if partitioned:
     partitioners = {"w": tf.variable_axis_size_partitioner(4)}
   else:
     partitioners = None
   conv = snt.Conv2D(output_channels=3, kernel_shape=3, stride=1,
                     partitioners=partitioners, name=name)
   conv(hidden)
   return conv
Example #14
0
def _build_conv_layer(conv_spec, data_format):
    return snt.Conv2D(output_channels=conv_spec.output_channels,
                      kernel_shape=conv_spec.kernel_shape,
                      stride=conv_spec.stride,
                      rate=conv_spec.rate,
                      padding=snt.SAME,
                      use_bias=True,
                      data_format=data_format,
                      initializers=_DEFAULT_CONV_INITIALIZERS,
                      regularizers=_DEFAULT_CONV_REGULARIZERS)
Example #15
0
def inference(inputs):
    inputs = snt.Conv2D(output_channels=166,
                        kernel_shape=3,
                        rate=1,
                        padding='SAME',
                        name='conv1')(inputs)
    inputs = tf.nn.relu(inputs)
    inputs = tf.nn.max_pool(inputs, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')

    inputs = snt.Conv2D(output_channels=32,
                        kernel_shape=3,
                        rate=2,
                        padding='SAME',
                        name='conv2')(inputs)
    inputs = tf.nn.relu(inputs)
    inputs = tf.nn.max_pool(inputs, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')

    inputs = snt.Conv2D(output_channels=64,
                        kernel_shape=3,
                        rate=4,
                        padding='SAME',
                        name='conv3')(inputs)
    inputs = tf.nn.relu(inputs)
    inputs = tf.nn.max_pool(inputs, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')

    inputs = snt.Conv2D(output_channels=128,
                        kernel_shape=3,
                        rate=8,
                        padding='SAME',
                        name='conv4')(inputs)
    inputs = tf.nn.relu(inputs)
    inputs = tf.nn.max_pool(inputs, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')

    inputs = snt.Conv2D(output_channels=256,
                        kernel_shape=3,
                        rate=16,
                        padding='SAME',
                        name='conv5')(inputs)
    inputs = tf.nn.relu(inputs)
    inputs = tf.nn.max_pool(inputs, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')

    inputs = snt.Conv2D(output_channels=256,
                        kernel_shape=3,
                        padding='SAME',
                        name='conv6')(inputs)
    inputs = tf.nn.relu(inputs)
    inputs = tf.nn.max_pool(inputs, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')

    inputs = snt.Conv2D(output_channels=1,
                        kernel_shape=1,
                        padding='SAME',
                        name='conv7')(inputs)
    coords, norm_heatmap = dsnt.dsnt(inputs)

    # The Sonnet option
    # coords, norm_heatmap = DSNT()(inputs)
    return coords, norm_heatmap
Example #16
0
def res_net_convolution(frame):
    for i, (num_ch, num_blocks) in enumerate([(16, 2), (32, 2), (32, 2)]):
        # Downscale.
        conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(frame)
        conv_out = tf.nn.pool(
        conv_out,
        window_shape=[3, 3],
        pooling_type='MAX',
        padding='SAME',
        strides=[2, 2])
        # Residual block(s).
        for j in range(num_blocks):
            with tf.variable_scope('residual_%d_%d' % (i, j)):
                block_input = conv_out
                conv_out = tf.nn.relu(conv_out)
                conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
                conv_out = tf.nn.relu(conv_out)
                conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
                conv_out += block_input
    return conv_out
Example #17
0
 def decoder_builder(y, z):
     # TODO: remove the linear layer
     # `n` is local parameter for size of internal linear layer
     n = 32
     latent = snt.Linear(n)(tf.concat(
         [tf.cast(y, dtype=tf.float32),
          tf.cast(z, dtype=tf.float32)], 1))
     latent = tf.reshape(latent, [-1, 1, 1, n])
     output = network(latent, **network_kwargs)
     output = snt.Conv2D(num_channels, 1, padding=snt.SAME)(output)
     return output
    def test_leading_batchnorm_rejected(self):
        module = snt.Sequential([
            _BatchNorm(),
            snt.Conv2D(output_channels=5, kernel_shape=3, padding='VALID'),
        ])

        network = ibp.VerifiableModelWrapper(module)
        network(self._inputs)

        with self.assertRaises(auto_verifier.NotVerifiableError):
            _ = auto_verifier.VerifiableLayerBuilder(network).build_layers()
Example #19
0
    def __init__(self,
                 image_feature_size=16,
                 kernel_size=3,
                 name=None,
                 **unused_kwargs):
        super(Model, self).__init__(name=name)
        self.encoder_graph = RelationNetwork(
            lambda: snt.nets.MLP([32, 16], activate_final=True),
            lambda: snt.nets.MLP([32, 16], activate_final=True))
        self.encoder_image = RelationNetwork(
            lambda: snt.nets.MLP([32, 16], activate_final=True),
            lambda: snt.nets.MLP([32, 16], activate_final=True))
        self.image_cnn = snt.Sequential([
            snt.Conv2D(16, kernel_size, stride=2), tf.nn.relu,
            snt.Conv2D(image_feature_size, kernel_size, stride=2), tf.nn.relu
        ])
        self.compare = snt.nets.MLP([32, 1])
        self.image_feature_size = image_feature_size

        self._step = None
def _avgpool_linear_script(num_classes, net, layer_values):
    layer_values = add_layer(net, snt.Conv2D(3, kernel_shape=(2, 2), **_inits),
                             layer_values)
    layer_values = add_layer(net, tf.nn.relu, layer_values)
    layer_values = add_layer(net, AvgPool(kernel_shape=(2, 2), strides=(1, 1)),
                             layer_values)
    layer_values = add_layer(net,
                             snt.Linear(num_classes, **_inits),
                             layer_values,
                             flatten=True)
    return layer_values
Example #21
0
    def __init__(self, name='MNIST_Discriminator', regularization=1.e-4):
        super(MNISTDiscriminator, self).__init__(name=name)

        reg = {
            'w': l2_regularizer(scale=regularization),
            'b': l2_regularizer(scale=regularization)
        }

        with self._enter_variable_scope():
            self.conv1 = snt.Conv2D(name='conv2d_1',
                                    output_channels=8,
                                    kernel_shape=5,
                                    stride=2,
                                    regularizers=reg)
            self.bn1 = snt.BatchNorm(name='batch_norm_1')
            self.conv2 = snt.Conv2D(name='conv2d_2',
                                    output_channels=16,
                                    kernel_shape=5,
                                    stride=1,
                                    regularizers=reg)
            self.bn2 = snt.BatchNorm(name='batch_norm_2')
            self.conv3 = snt.Conv2D(name='conv2d_3',
                                    output_channels=32,
                                    kernel_shape=5,
                                    stride=2,
                                    regularizers=reg)
            self.bn3 = snt.BatchNorm(name='batch_norm_3')
            self.conv4 = snt.Conv2D(name='conv2d_4',
                                    output_channels=64,
                                    kernel_shape=5,
                                    stride=1,
                                    regularizers=reg)
            self.bn4 = snt.BatchNorm(name='batch_norm_4')
            self.conv5 = snt.Conv2D(name='conv2d_5',
                                    output_channels=65,
                                    kernel_shape=5,
                                    stride=2,
                                    regularizers=reg)
            self.bn5 = snt.BatchNorm(name='batch_norm_5')
            self.flatten = snt.BatchFlatten(name='flatten')
            self.linear = snt.Linear(name='l', output_size=1, regularizers=reg)
Example #22
0
    def __init__(self, name='Discriminator', image_size=64, ndf=64, regularization=1.e-4):
        super(Discriminator, self).__init__(name=name)

        reg = {'w': l2_regularizer(scale=regularization)}

        self.convs = []
        self.batch_norms = []

        csize, cndf = image_size / 2, ndf

        with self._enter_variable_scope():
            self.convs.append(snt.Conv2D(name='conv2d_1',
                                         output_channels=ndf,
                                         kernel_shape=4,
                                         stride=2,
                                         padding='VALID',
                                         regularizers=reg,
                                         use_bias=False))
            self.batch_norms.append(snt.BatchNorm(name='batch_norm_1'))

            n_layer = 2
            while csize > 4:
                self.convs.append(snt.Conv2D(name='conv2d_{}'.format(n_layer),
                                             output_channels=cndf * 2,
                                             kernel_shape=4,
                                             stride=2,
                                             padding='SAME',
                                             regularizers=reg,
                                             use_bias=False))
                self.batch_norms.append(snt.BatchNorm(name='batch_norm_{}'.format(n_layer)))
                cndf = cndf * 2
                csize = csize // 2
                n_layer += 1

            self.convs.append(snt.Conv2D(name='conv2d_{}'.format(n_layer),
                                         output_channels=1,
                                         kernel_shape=4,
                                         stride=1,
                                         padding='SAME',
                                         regularizers=reg,
                                         use_bias=False))
Example #23
0
    def _build(self, h):
        for i in range(self._num_residual_layers):
            h_i = self._activation(h)

            h_i = snt.Conv2D(output_channels=self._num_residual_hiddens,
                             kernel_shape=(self._filter_size,
                                           self._filter_size),
                             stride=(1, 1),
                             initializers=self._initializers,
                             data_format=self._data_format,
                             name='res_nxn_%d' % i)(h_i)
            h_i = self._activation(h_i)

            h_i = snt.Conv2D(output_channels=self._num_hiddens,
                             kernel_shape=(1, 1),
                             stride=(1, 1),
                             initializers=self._initializers,
                             data_format=self._data_format,
                             name='res_1x1_%d' % i)(h_i)
            h += h_i
        return self._activation(h)
Example #24
0
    def testGetNormalizedVariableMapModule(self):
        input_ = tf.placeholder(tf.float32, shape=[1, 10, 10, 3])
        conv = snt.Conv2D(output_channels=3, kernel_shape=3)
        conv(input_)

        variable_map = snt.get_normalized_variable_map(conv)

        self.assertEqual(len(variable_map), 2)
        self.assertIn("w", variable_map)
        self.assertIn("b", variable_map)
        self.assertIs(variable_map["w"], conv.w)
        self.assertIs(variable_map["b"], conv.b)
Example #25
0
    def __init__(self,
                 action_spec: specs.DiscreteArray,
                 name: Optional[Text] = None):
        super().__init__(name=name)

        # Spatial
        self.conv1 = snt.Conv2D(16, 1, 1, data_format="NHWC", name="conv_1")
        self.conv2 = snt.Conv2D(32, 3, 1, data_format="NHWC", name="conv_2")
        self.conv3 = snt.Conv2D(64, 3, 1, data_format="NHWC", name="conv_3")
        self.conv4 = snt.Conv2D(32, 3, 1, data_format="NHWC", name="conv_4")
        self.flatten = snt.Flatten()

        self.fc1 = snt.Linear(256, name="fc_1")

        # Flat
        self.flat = snt.nets.MLP([64, 64], name="mlp_1")
        self.rnn = snt.DeepRNN([
            snt.nets.MLP([50, 50], activate_final=True, name="mlp_2"),
            snt.GRU(512, name="gru"),
            networks.PolicyValueHead(action_spec.num_values)
        ])
    def _build(self, x):
        h = snt.Conv2D(output_channels=self._num_hiddens / 2,
                       kernel_shape=(4, 4),
                       stride=(2, 2),
                       name="enc_1")(x)
        h = tf.nn.relu(h)

        h = snt.Conv2D(output_channels=self._num_hiddens,
                       kernel_shape=(4, 4),
                       stride=(2, 2),
                       name="enc_2")(h)
        h = tf.nn.relu(h)

        h = snt.Conv2D(output_channels=self._num_hiddens,
                       kernel_shape=(3, 3),
                       stride=(1, 1),
                       name="enc_3")(h)

        h = residual_stack(h, self._num_hiddens, self._num_residual_layers,
                           self._num_residual_hiddens)
        return h
Example #27
0
    def test_get_parameter_overview_empty(self):
        module = snt.Module()
        snt.allow_empty_variables(module)

        # No variables.
        self.assertEqual(EMPTY_PARAMETER_OVERVIEW,
                         parameter_overview.get_parameter_overview(module))

        module.conv = snt.Conv2D(output_channels=2, kernel_shape=3)
        # Variables not yet created (happens in the first forward pass).
        self.assertEqual(EMPTY_PARAMETER_OVERVIEW,
                         parameter_overview.get_parameter_overview(module))
Example #28
0
    def _build(self, x):
        h = snt.Conv2D(output_channels=self._num_hiddens,
                       kernel_shape=(3, 3),
                       stride=(1, 1),
                       name="dec_1")(x)

        h = self._dropout(h, training=self._is_training)
        h = tf.layers.batch_normalization(
            h,
            training=self._is_training,
            momentum=self._bn_momentum,
            renorm=self._bn_renormalization,
            renorm_momentum=self._bn_momentum,
            renorm_clipping=self._renorm_clipping,
            name="batch_norm_1")

        h = residual_stack(h,
                           self._num_hiddens,
                           self._num_residual_layers,
                           self._num_residual_hiddens,
                           activation=self._activation,
                           training=self._is_training,
                           prob_drop=self._prob_drop,
                           momentum=self._bn_momentum,
                           renorm=self._bn_renormalization,
                           renorm_momentum=self._bn_momentum,
                           renorm_clipping=self._renorm_clipping)

        h = snt.Conv2DTranspose(output_channels=int(self._num_hiddens / 2),
                                output_shape=None,
                                kernel_shape=(4, 4),
                                stride=(2, 2),
                                name="dec_2")(h)

        h = self._dropout(h, training=self._is_training)
        h = tf.layers.batch_normalization(
            h,
            training=self._is_training,
            momentum=self._bn_momentum,
            renorm=self._bn_renormalization,
            renorm_momentum=self._bn_momentum,
            renorm_clipping=self._renorm_clipping,
            name="batch_norm_2")

        h = self._activation(h)

        x_recon = snt.Conv2DTranspose(output_channels=3,
                                      output_shape=None,
                                      kernel_shape=(4, 4),
                                      stride=(2, 2),
                                      name="dec_3")(h)

        return x_recon
Example #29
0
    def _build(self, inputs, is_training=True, test_local_stats=False):
        """
    Args:
        inputs (type): node of input.
        is_training (type): tells to batchnorm if to generate the update ops.
        test_local_stats (type): used to test local stats in batch norm.

    Returns:
        logits

    """
        # instantiate all the convolutional layers
        self.layers = [
            snt.Conv2D(name="conv_2d",
                       output_channels=self._conv_channels,
                       kernel_shape=self._conv_kernel_shape,
                       stride=self._conv_stride,
                       padding=self._padding,
                       use_bias=True,
                       **self._extra_params)
        ]
        #(self, depth, name="resUnit", kernel_shape=[3,3], stride=1, activation=tf.nn.relu, **extra_params)
        for i in range(self._num_resunits):
            self.layers.append(
                ResUnit(depth=self._resunit_channels[i],
                        name="resunit{}".format(i),
                        kernel_shape=self._resunit_kernel_shapes[i],
                        stride=self._resunit_strides[i],
                        activation=self._activation,
                        **self._extra_params))

        net = self.layers[0](inputs)
        net = tf.layers.max_pooling2d(net,
                                      self._pooling_kernel_shape,
                                      self._pooling_stride,
                                      padding=self._padding,
                                      data_format='channels_last',
                                      name="max_pooling2d")

        for i, resunit in enumerate(self.layers[1:]):
            net = resunit(net)

        net = tf.layers.average_pooling2d(net,
                                          self._pooling_kernel_shape,
                                          self._pooling_stride,
                                          padding=self._padding,
                                          data_format='channels_last',
                                          name="avg_pooling2d")

        net = snt.BatchFlatten(name="flatten")(net)
        logits = snt.Linear(self._output_size)(net)

        return logits
Example #30
0
 def test_get_parameter_overview_on_module(self):
   module = snt.Module()
   # Weights of a 2D convolution with 2 filters..
   module.conv = snt.Conv2D(output_channels=2, kernel_shape=3, name="conv")
   module.conv(tf.ones((2, 5, 5, 3)))  # 3 * 3^2 * 2 = 56 parameters
   for v in module.variables:
     v.assign(tf.ones_like(v))
   self.assertEqual(
       SNT_CONV2D_PARAMETER_OVERVIEW,
       parameter_overview.get_parameter_overview(module, include_stats=False))
   self.assertEqual(SNT_CONV2D_PARAMETER_OVERVIEW_WITH_STATS,
                    parameter_overview.get_parameter_overview(module))