def apply_batch_reshape(self, wrapper, shape):
     reshape = snt.BatchReshape(shape)
     if self.vertices.shape.ndims == self.nominal.shape.ndims:
         reshape_vertices = reshape
     else:
         reshape_vertices = snt.BatchReshape(shape, preserve_dims=2)
     return SimplexBounds(reshape_vertices(self.vertices),
                          reshape(self.nominal), self.r)
    def _build(self, inputs):
        """Constructs the generator graph.

    Args:
      inputs: `tf.Tensor` with the input of the generator.

    Returns:
      `tf.Tensor`, the generated samples.
    """
        leaky_relu_activation = lambda x: tf.maximum(0.2 * x, x)
        init_dict = {
            'w': tf.truncated_normal_initializer(seed=547, stddev=0.02),
            'b': tf.constant_initializer(0.3)
        }
        linear = snt.Linear(7 * 7 * 64, initializers=init_dict)
        inputs = linear(inputs)

        # Reshape the data to have rank 4.
        inputs = snt.BatchReshape((7, 7, 64))(inputs)
        inputs = leaky_relu_activation(inputs)

        net = snt.nets.ConvNet2DTranspose(output_channels=[32, 1],
                                          output_shapes=[[14, 14], [28, 28]],
                                          strides=[2],
                                          paddings=[snt.SAME],
                                          kernel_shapes=[[5, 5]],
                                          use_batch_norm=False,
                                          initializers=init_dict)

        # We use tanh to ensure that the generated samples are in the same range
        # as the data.
        return tf.nn.sigmoid(net(inputs))
    def __init__(self,
                 FLAGS,
                 init=None,
                 activ=None,
                 name='DecoderConvT',
                 is_training_ph=None):
        super().__init__(FLAGS=FLAGS, init=init, activ=activ, name=name)
        if is_training_ph is not None:
            self.is_training = is_training_ph
        else:
            self.is_training = self.FLAGS['is_training']
        self.is_training = self.FLAGS['is_training']

        with self._enter_variable_scope():
            self._reshape = snt.BatchReshape([1, 1, self.FLAGS['vae_z_size']])
            self._upconv = snt.nets.ConvNet2DTranspose(
                output_channels=[32, 16, 3],
                output_shapes=[None, None, None],
                kernel_shapes=[7, 4, 3],
                strides=[1, 4, 3],
                paddings=['VALID', 'SAME', 'SAME'],
                activation=self.activ,
                initializers=self.init,
                use_batch_norm=self.FLAGS['use_bn'],
                batch_norm_config={'update_ops_collection': None},
            )
Пример #4
0
    def _build(self, inpt):
        n = np.prod(self._output_size)

        mlp = MLP(self._n_hidden, n_out=n)
        reshape = snt.BatchReshape(self._output_size)
        seq = snt.Sequential([mlp, reshape])
        return seq(inpt) * tf.get_variable('output_scale', initializer=self._output_scale)
Пример #5
0
    def _build(self, inputs):
        """Looks up rows in memory.

    In the args list, we have the following conventions:
      B: batch size
      M: number of slots in a row of the memory matrix #slot指什么?
      R: number of rows in the memory matrix
      H: number of read heads in the memory controller

    Args:
      inputs: A tuple of
        *  read_inputs, a tensor of shape [B, ...] that will be flattened and
             passed through a linear layer to get read keys/read_strengths for
             each head.
        *  mem_state, the primary memory tensor. Of shape [B, R, M].

    Returns:
      The read from the memory (concatenated across read heads) and read
        information.
    """
        # Assert input shapes are compatible and separate inputs.
        _assert_compatible_memory_reader_input(inputs)
        read_inputs, mem_state = inputs  #由两部分构成
        #print("read_inputs",read_inputs.shape)
        #print("mem_state",mem_state.shape)
        #几个要搞清楚的词:read weightings(算完cos之后的);key;srengths(只是一个用来做加权的,但是没搞懂代表什么含义);
        # Determine the read weightings for each key.
        flat_outputs = self._keys_and_read_strengths_generator(
            snt.BatchFlatten()(read_inputs))
        #各条记忆之间做一个权重?? read inputs
        #print("flat_outputs",flat_outputs.shape)
        # Separate the read_strengths from the rest of the weightings.#同一个batch中不同的记忆之间的权重
        h = self._num_read_heads
        #print("h",h)
        flat_keys = flat_outputs[:, :-h]  #前h 列
        #print("flat_keys",flat_keys)
        read_strengths = tf.nn.softplus(flat_outputs[:, -h:])  #后h 列
        #print("read_strengths",read_strengths.shape)
        # Reshape the weights.
        read_shape = (self._num_read_heads, self._memory_word_size)
        #print("read_shape",read_shape)
        read_keys = snt.BatchReshape(read_shape)(flat_keys)
        #print("read_keys",read_keys.shape)
        # Read from memory.
        #print("_top_k",self._top_k)
        memory_reads, read_weights, read_indices, read_strengths = (
            read_from_memory(read_keys, read_strengths, mem_state,
                             self._top_k))
        #print("memory_reads.shape",memory_reads.shape)
        #print("read_weights",read_weights.shape)
        #print("read_indices",read_indices.shape)
        #print("read_strength",read_strengths.shape)
        concatenated_reads = snt.BatchFlatten()(memory_reads)
        global kkk
        kkk = kkk + 1
        print("----------", kkk)
        return concatenated_reads, ReadInformation(weights=read_weights,
                                                   indices=read_indices,
                                                   keys=read_keys,
                                                   strengths=read_strengths)
Пример #6
0
 def apply_batch_reshape(self, wrapper, shape):
   bounds_out = super(RelativeSymbolicBounds, self).apply_batch_reshape(
       wrapper, shape)
   nominal_out = snt.BatchReshape(shape)(self._nominal)
   return RelativeSymbolicBounds(
       bounds_out.lower, bounds_out.upper, nominal_out).with_priors(
           wrapper.output_bounds)
Пример #7
0
    def _build(self, inputs):

        if FLAGS.l2_reg:
            regularizers = {
                'w': lambda w: FLAGS.l2_reg * tf.nn.l2_loss(w),
                'b': lambda w: FLAGS.l2_reg * tf.nn.l2_loss(w),
            }
        else:
            regularizers = None

        reshape = snt.BatchReshape([28, 28, 1])

        conv = snt.Conv2D(2, 5, padding=snt.SAME, regularizers=regularizers)
        relu = tf.nn.relu(conv(reshape(inputs)))

        max_pool = tf.nn.max_pool(relu, (2, 2), (2, 2), padding=snt.SAME)

        conv = snt.Conv2D(4, 5, padding=snt.SAME, regularizers=regularizers)
        relu = tf.nn.relu(conv(max_pool))

        max_pool = tf.nn.max_pool(relu, (2, 2), (2, 2), padding=snt.SAME)

        flatten = snt.BatchFlatten()(max_pool)

        linear = snt.Linear(32, regularizers=regularizers)(flatten)

        return snt.Linear(10, regularizers=regularizers)(linear)
Пример #8
0
    def __init__(self, name='MNIST_Generator', regularization=1.e-4):
        super(MNISTGenerator, self).__init__(name=name)

        reg = {
            'w': l2_regularizer(scale=regularization),
            'b': l2_regularizer(scale=regularization)
        }

        with self._enter_variable_scope():
            self.linear = snt.Linear(name='linear',
                                     output_size=3136,
                                     regularizers=reg)
            self.bn1 = snt.BatchNorm(name='batch_norm_1')
            self.reshape = snt.BatchReshape(name='reshape', shape=[7, 7, 64])
            self.deconv1 = snt.Conv2DTranspose(name='tr-conv2d_1',
                                               output_channels=64,
                                               kernel_shape=5,
                                               stride=2,
                                               regularizers=reg)
            self.bn2 = snt.BatchNorm(name='batch_norm_2')
            self.deconv2 = snt.Conv2DTranspose(name='tr-conv2d_2',
                                               output_channels=32,
                                               kernel_shape=5,
                                               stride=1,
                                               regularizers=reg)
            self.bn3 = snt.BatchNorm(name='batch_norm_3')
            self.deconv3 = snt.Conv2DTranspose(name='tr-conv2d_3',
                                               output_channels=3,
                                               kernel_shape=5,
                                               stride=2,
                                               regularizers=reg)
Пример #9
0
  def _build(self, inputs):

    if FLAGS.l2_reg:
      regularizers = {'w': lambda w: FLAGS.l2_reg*tf.nn.l2_loss(w),
                      'b': lambda w: FLAGS.l2_reg*tf.nn.l2_loss(w),}
    else:
      regularizers = None

    reshape = snt.BatchReshape([28, 28, 1])

    conv = snt.Conv2D(2, 5, padding=snt.SAME, regularizers=regularizers)
    act = _NONLINEARITY(conv(reshape(inputs)))

    pool = tf.nn.pool(act, window_shape=(2, 2), pooling_type=_POOL,
                      padding=snt.SAME, strides=(2, 2))

    conv = snt.Conv2D(4, 5, padding=snt.SAME, regularizers=regularizers)
    act = _NONLINEARITY(conv(pool))

    pool = tf.nn.pool(act, window_shape=(2, 2), pooling_type=_POOL,
                      padding=snt.SAME, strides=(2, 2))

    flatten = snt.BatchFlatten()(pool)

    linear = snt.Linear(32, regularizers=regularizers)(flatten)

    return snt.Linear(10, regularizers=regularizers)(linear)
Пример #10
0
    def decode(self, latent_code):
        """
        Builds the decoder part of the VAE
        """

        # Create regular hidden layers
        # Layer 1
        linear = snt.Linear(self.num_units, name='decoder_hidden_1')
        dense = linear(latent_code)
        dense = tf.nn.relu(dense)

        # Layer 2
        linear = snt.Linear(self.num_units, name='decoder_hidden_2')
        dense = linear(dense)
        dense = tf.nn.relu(dense)

        # Layer 3
        linear = snt.Linear(self.num_inputs, name='decoder_hidden_logits')
        logits = linear(dense)

        to_output_shape = snt.BatchReshape(shape=self.input_shape)
        output = to_output_shape(logits)

        decoder_bernoulli = tfd.Bernoulli(logits=output)

        return decoder_bernoulli
 def reshape_duals_forwards(self, next_layer, dual_vars):
     if next_layer.reshape:
         # There was a reshape prior to the next layer.
         reshape = snt.BatchReshape(next_layer.input_shape, preserve_dims=2)
         dual_vars = {
             key: reshape(dual_var)
             for key, dual_var in dual_vars.items()
         }
     return dual_vars
Пример #12
0
  def _build(self, observation):
    values = []

    flat_obs = snt.BatchFlatten()(observation)
    for _ in range(self._n_cumulants):
      net = snt.nets.MLP(**self._network_kwargs)(flat_obs)
      net = snt.Linear(output_size=self._n_policies * self._n_actions)(net)
      net = snt.BatchReshape([self._n_policies, self._n_actions])(net)
      values.append(net)
    values = tf.stack(values, axis=2)
    return values
Пример #13
0
 def decode(self, code):
     """Decode the image observation from a latent code."""
     if self._convnet_output_shape is None:
         raise ValueError('Must call `encode` before `decode`.')
     transpose_convnet_in_flat = snt.Linear(
         self._convnet_output_shape.num_elements(),
         name='decode_initial_linear')(code)
     transpose_convnet_in_flat = tf.nn.relu(transpose_convnet_in_flat)
     transpose_convnet_in = snt.BatchReshape(
         self._convnet_output_shape.as_list())(transpose_convnet_in_flat)
     return self._convnet.transpose(None)(transpose_convnet_in)
Пример #14
0
    def apply_batch_reshape(self, wrapper, shape):
        """Propagates the bounds through a reshape.

    Args:
      wrapper: Contains prior bounds from a previous iteration.
      shape: output shape, excluding the batch dimension.

    Returns:
      Output bounds.
    """
        reshape = snt.BatchReshape(shape)
        return RelativeIntervalBounds(reshape(self.lower_offset),
                                      reshape(self.upper_offset),
                                      reshape(self.nominal))
Пример #15
0
    def __init__(self,
                 z_dim,
                 embed_dim,
                 action_spec,
                 decoder_params,
                 order,
                 grid_height,
                 grid_width,
                 name="autoregressive_heads"):
        super(AutoregressiveHeads, self).__init__(name=name)

        self._z_dim = z_dim
        self._action_spec = action_spec
        self._grid_height = grid_height
        self._grid_width = grid_width

        # Filter the order of actions according to the actual action specification.
        order = self.ORDERS[order]
        self._order = [k for k in order if k in action_spec]

        with self._enter_variable_scope():
            self._action_embeds = collections.OrderedDict([
                (k, snt.Linear(output_size=embed_dim,
                               name=k + "_action_embed"))
                for k in six.iterkeys(action_spec)
            ])

            self._action_heads = []
            for k, v in six.iteritems(action_spec):
                if k in LOCATION_KEYS:
                    decoder = utils.ConvDecoder(  # pylint: disable=not-callable
                        name=k + "_action_decoder",
                        **decoder_params)
                    action_head = snt.Sequential([
                        snt.BatchReshape([4, 4, -1]), decoder,
                        snt.BatchFlatten()
                    ],
                                                 name=k + "_action_head")
                else:
                    output_size = v.maximum - v.minimum + 1
                    action_head = snt.Linear(output_size=output_size,
                                             name=k + "_action_head")
                self._action_heads.append((k, action_head))
            self._action_heads = collections.OrderedDict(self._action_heads)

            self._residual_mlps = {}
            for k, v in six.iteritems(self._action_spec):
                self._residual_mlps[k] = snt.nets.MLP(
                    output_sizes=[16, 32, self._z_dim],
                    name=k + "_residual_mlp")
    def _build(self, observation, actions):
        obs = observation["arena"]

        n_outputs = self._n_actions * self._n_phis
        flat_obs = snt.BatchFlatten()(obs)
        net = snt.nets.MLP(**self._network_kwargs)(flat_obs)
        net = snt.Linear(output_size=n_outputs)(net)
        net = snt.BatchReshape((self._n_actions, self._n_phis))(net)

        indices = tf.stack([tf.range(tf.shape(actions)[0]), actions], axis=1)
        values = tf.gather_nd(net, indices)
        if self._final_activation:
            values = getattr(tf.nn, self._final_activation)(values)

        return values
Пример #17
0
    def _build(self, inputs):
        """Looks up rows in memory.

    In the args list, we have the following conventions:
      B: batch size
      M: number of slots in a row of the memory matrix
      R: number of rows in the memory matrix
      H: number of read heads in the memory controller

    Args:
      inputs: A tuple of
        *  read_inputs, a tensor of shape [B, ...] that will be flattened and
             passed through a linear layer to get read keys/read_strengths for
             each head.
        *  mem_state, the primary memory tensor. Of shape [B, R, M].

    Returns:
      The read from the memory (concatenated across read heads) and read
        information.
    """
        # Assert input shapes are compatible and separate inputs.
        _assert_compatible_memory_reader_input(inputs)
        read_inputs, mem_state = inputs

        # Determine the read weightings for each key.
        flat_outputs = self._keys_and_read_strengths_generator(
            snt.BatchFlatten()(read_inputs))

        # Separate the read_strengths from the rest of the weightings.
        h = self._num_read_heads
        flat_keys = flat_outputs[:, :-h]
        read_strengths = tf.nn.softplus(flat_outputs[:, -h:])

        # Reshape the weights.
        read_shape = (self._num_read_heads, self._memory_word_size)
        read_keys = snt.BatchReshape(read_shape)(flat_keys)

        # Read from memory.
        memory_reads, read_weights, read_indices, read_strengths = (
            read_from_memory(read_keys, read_strengths, mem_state,
                             self._top_k))
        concatenated_reads = snt.BatchFlatten()(memory_reads)

        return concatenated_reads, ReadInformation(weights=read_weights,
                                                   indices=read_indices,
                                                   keys=read_keys,
                                                   strengths=read_strengths)
Пример #18
0
    def __init__(self, name='Generator', latent_size=50, image_size=64, ngf=64, regularization=1.e-4):
        super(Generator, self).__init__(name=name)

        reg = {'w': l2_regularizer(scale=regularization)}

        self.conv_trs = []
        self.batch_norms = []
        self.latent_size = latent_size

        cngf, tisize = ngf // 2, 4
        while tisize != image_size:
            cngf = cngf * 2
            tisize = tisize * 2

        with self._enter_variable_scope():
            self.reshape = snt.BatchReshape(name='batch_reshape', shape=[1, 1, latent_size])
            self.conv_trs.append(snt.Conv2DTranspose(name='tr-conv2d_1',
                                                     output_channels=cngf,
                                                     kernel_shape=4,
                                                     stride=1,
                                                     padding='VALID',
                                                     regularizers=reg,
                                                     use_bias=False))
            self.batch_norms.append(snt.BatchNorm(name='batch_norm_1'))
            csize, cndf = 4, cngf
            n_layer = 2
            while csize < image_size // 2:
                self.conv_trs.append(snt.Conv2DTranspose(name='tr-conv2d_{}'.format(n_layer),
                                                         output_channels=cndf // 2,
                                                         kernel_shape=4,
                                                         stride=2,
                                                         padding='SAME',
                                                         regularizers=reg,
                                                         use_bias=False))
                self.batch_norms.append(snt.BatchNorm(name='batch_norm_{}'.format(n_layer)))
                n_layer += 1
                cndf = cndf // 2
                csize = csize * 2

            self.conv_trs.append(snt.Conv2DTranspose(name='tr-conv2d_{}'.format(n_layer),
                                                     output_channels=3,
                                                     kernel_shape=4,
                                                     stride=2,
                                                     padding='SAME',
                                                     regularizers=reg,
                                                     use_bias=False))
Пример #19
0
 def __init__(self, config, name='attention_net'):
     super(AttentionNet, self).__init__(name=name)
     assert len(config['att_channel']) == len(config['att_kernel'])
     with self._enter_variable_scope(check_same_graph=False):
         self._layers_down = []
         self._layers_up_rev = []
         plane_ht, plane_wd = config['image_shape'][:2]
         for idx, (channel, kernel, stride) in enumerate(
                 zip(config['att_channel'], config['att_kernel'], config['att_stride'])):
             self._layers_down.append(UNetBlockDown(channel, kernel, stride, name='down_{}'.format(idx)))
             self._layers_up_rev.append(UNetBlockUp(channel, kernel, name='up_{}'.format(idx)))
             plane_ht //= stride
             plane_wd //= stride
         self._conv_out = snt.Conv2D(1, 1, name='conv_out')
         layers_linear = [snt.BatchFlatten(name='flatten')]
         channel_last = config['att_channel'][-1]
         for idx, hidden in enumerate(config['att_hidden'] + [plane_ht * plane_wd * channel_last]):
             layers_linear += [
                 snt.Linear(hidden, name='linear_{}'.format(idx)),
                 partial(tf.nn.relu, name='relu_{}'.format(idx)),
             ]
         layers_linear.append(snt.BatchReshape([plane_ht, plane_wd, channel_last], name='reshape'))
         self._linear = snt.Sequential(layers_linear, name='connect')
Пример #20
0
def conv_weighted_gram_abs_projection_slow(w, d, beta, padding, strides):
    """Calculates a projection of | W^T d W | for an N-D convolution W.

  Computes  beta_i^{-1} sum_j |Q_ij| beta_j  where  Q = W^T d W  is the
  weighted Gram matrix for the convolution.

  The convolution exploits sparsity of the convolution, thereby managing
  to run in  O(K^2 M C^3 + K^3 M C^2)  time per example, for C channels,
  spatial size M, and kernel size K. By comparison, working with
  a fully materialised MCxMC matrix would require  O(M^3 C^3)  time.

  Args:
    w: (N+2)D tensor of shape (kernel_height, kernel_width,
      input_channels, output_channels) containing the convolutional kernel.
    d: (N+3)D tensor of shape (num_targets, batch_size,
      output_height, output_width, output_channels), interpreted as a
      diagonal weight matrix.
    beta: (N+3)D tensor of shape (num_targets, batch_size,
      input_height, input_width, input_channels) specifying the projection.
    padding: `"VALID"` or `"SAME"`, the convolution's padding algorithm.
    strides: Integer list of `[vertical_stride, horizontal_stride]`.

  Returns:
    (N+3)D tensor of shape (num_targets, batch_size,
    input_height, input_width, input_channels) containing | W^T d W | beta.
  """
    input_shape = beta.shape[2:].as_list()

    flatten = snt.BatchFlatten(preserve_dims=2)
    unflatten = snt.BatchReshape(input_shape, preserve_dims=2)

    w_lin, _ = layer_utils.materialise_conv(w, None, input_shape, padding,
                                            strides)
    return unflatten(
        linear_weighted_gram_abs_projection_slow(w_lin, flatten(d),
                                                 flatten(beta)))
Пример #21
0
 def _build(self, inputs, is_training=True):
     del is_training
     net = snt.nets.MLP([1000, 1000, 4096], activation=tf.nn.leaky_relu)
     out = net(inputs)
     out = tf.nn.tanh(out)
     return snt.BatchReshape([256, 16, 1])(out)
Пример #22
0
 def reshape_duals_backwards(self, dual_vars):
     if self.linear_layer.reshape:
         # There was a reshape prior to the linear layer.
         dual_vars = snt.BatchReshape(self.activation_layer.output_shape,
                                      preserve_dims=2)(dual_vars)
     return dual_vars
Пример #23
0
 def _batch_reshape_expression(expr, shape):
   w = snt.BatchReshape(shape, preserve_dims=2)(expr.w)
   b = snt.BatchReshape(shape)(expr.b)
   return LinearExpression(w=w, b=b, lower=expr.lower, upper=expr.upper)
Пример #24
0
    def _build(self, inputs):
        """
        Args:
            inputs (type): node of input.
            is_training (type): tells to batchnorm if to generate the update ops.

        Returns:
            logits

        """

        net = inputs

        #LINEAR BLOCK WITH RESHAPE IF NEEDED
        # if linear_first I add extra Linear layers
        if self._linear_first is not None:
            self.linear_layers = [
                snt.Linear(name="linear_{}".format(i),
                           output_size=self._linear_first_sizes[i],
                           use_bias=True,
                           **self._extra_params)
                for i in range(len(self._linear_first_sizes))
            ]

            for i, layer in enumerate(self.linear_layers):
                net = layer(net)
                net = self._dropout(net, training=self._is_training)
                net = tf.layers.batch_normalization(
                    net,
                    training=self._is_training,
                    momentum=self._bn_momentum,
                    renorm=self._bn_renormalization,
                    renorm_momentum=self._bn_momentum,
                    renorm_clipping=self._renorm_clipping,
                    name="batch_norm_lin_{}".format(i))
                net = self._activation(net)

            net = snt.BatchReshape(shape=self._linear_first_reshape)(net)

        #CONV BLOCKS FROM HERE
        self.layers = [
            snt.Conv2DTranspose(name="conv_2d_T_{}".format(i),
                                output_channels=self._hidden_channels[i],
                                kernel_shape=self._kernel_shape,
                                stride=self._decide_stride(i),
                                padding=self._padding,
                                use_bias=True,
                                **self._extra_params)
            for i in range(self._num_layers - 1)
        ]

        li = self._num_layers - 1

        if self._output_shape is None:
            lastlayer = snt.Conv2DTranspose(
                name="conv_2d_T_{}".format(li),
                output_channels=self._hidden_channels[li],
                kernel_shape=self._kernel_shape,
                stride=self._decide_stride(li),
                padding=self._padding,
                use_bias=True,
                **self._extra_params)
        else:
            lastlayer = snt.Conv2DTranspose(
                name="conv_2d_T_{}".format(li),
                output_channels=self._hidden_channels[li],
                kernel_shape=self._kernel_shape,
                output_shape=self._output_shape,
                use_bias=True,
                **self._extra_params)

        self.layers.append(lastlayer)

        # connect them to the graph, adding batch norm and non-linearity
        for i, layer in enumerate(self.layers):
            net = layer(net)
            net = self._dropout(net, training=self._is_training)
            net = tf.layers.batch_normalization(
                net,
                training=self._is_training,
                momentum=self._bn_momentum,
                renorm=self._bn_renormalization,
                renorm_momentum=self._bn_momentum,
                renorm_clipping=self._renorm_clipping,
                name="batch_norm_{}".format(i))

            # no activation at the end
            if i < li:
                net = self._activation(net)

        if self._final_activation:
            net = self._activation(net)

        return net
Пример #25
0
                                           output_shapes=((6, 6), (12, 12),
                                                          (24, 24), (48, 48)),
                                           kernel_shapes=(6, 5, 5, 3),
                                           strides=(6, 2, 2, 2),
                                           paddings=(snt.SAME, ))

input_image = value[0]
embedding = conv_encoder(input_image)
for l in conv_encoder._layers:
    print("layer thingy: {}".format(l.input_shape[1:3]))

os = conv_encoder.transpose()._output_shapes
print("output shapes: {}".format([s() for s in os]))

embedding = snt.BatchFlatten()(embedding)
embedding = mlp_encoder(embedding)

decoded = mlp_decoder(embedding)
decoded = snt.BatchReshape(shape=(1, 1, 1024))(decoded)
decoded = conv_decoder(decoded)
reconstruction_err = tf.losses.mean_squared_error(input_image, decoded)

optimizer = tf.train.AdamOptimizer()
train_op = optimizer.minimize(reconstruction_err)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(1000):
        err, _ = sess.run([reconstruction_err, train_op])
        print("{} : reconstruction error: {}".format(i, err))
    def _calc_w_norm(self, layer, x_lm1, beta_l, alpha_l):
        # Norm of w, used for computing nu_lm1.
        if self._verify_option == VerifyOption.WEAK:
            w_norm = layer.forward_prop(beta_l, w_fn=tf.abs) * (alpha_l)
            w_norm = layer.backward_prop(w_norm, w_fn=tf.abs) / beta_l
        elif self._verify_option == VerifyOption.STRONG:
            w_norm = layer.custom_op(
                _ScaledGramComputation(self._soft_abs, self._exact_k), alpha_l,
                beta_l)
        else:
            # Linearise the convolution.
            flatten = snt.BatchFlatten(preserve_dims=2)
            unflatten = snt.BatchReshape(x_lm1.shape[2:].as_list(),
                                         preserve_dims=2)
            # flatten(beta_l): KxNxI   w_lin: IxO   flatten(delta_l,gam_l): KxNxO
            if self._verify_option == VerifyOption.SVD:
                w_lin, _ = layer.flatten()
                w_scaled = (tf.expand_dims(flatten(beta_l), -1) *
                            tf.expand_dims(tf.expand_dims(w_lin, 0), 0) *
                            tf.expand_dims(
                                tf.sqrt(flatten(alpha_l) + self._sqrt_eps), 2))
                s = tf.svd(w_scaled, compute_uv=False)
                w_norm = tf.expand_dims(tf.reduce_max(s, axis=-1), axis=-1)
                w_norm = unflatten(w_norm * w_norm) / (beta_l * beta_l)
            elif self._verify_option == VerifyOption.STRONG_APPROX:
                # Get size of input to layer
                size_list = beta_l[0, 0, ...].shape.as_list()
                size_x = 1
                for s in size_list:
                    size_x *= s

                # Prepare data
                shape_beta = beta_l.shape.as_list()[2:]
                batch_shape = beta_l.shape.as_list()[:2]
                shape_alpha = alpha_l.shape.as_list()[2:]
                beta_reduce = flatten(beta_l)
                beta_reduce = beta_reduce / tf.reduce_sum(
                    beta_reduce, axis=2, keepdims=True)
                beta_l = tf.reshape(beta_reduce, beta_l.shape)

                k_sample = min(self._approx_k, size_x)

                def process_columns(x, beta_cur):
                    """Compute |W^T[alpha]Wx|beta_cur."""
                    shape_x_batch = x.shape.as_list()[:3]
                    x = tf.reshape(x, [shape_x_batch[0], -1] + shape_beta)
                    x_prop = tf.reshape(layer.forward_prop(x),
                                        shape_x_batch + shape_alpha)
                    x = layer.backward_prop(
                        tf.reshape(x_prop * tf.expand_dims(alpha_l, 2),
                                   [shape_x_batch[0], -1] + shape_alpha))
                    x = tf.reshape(x, shape_x_batch + shape_beta)

                    # Flatten beta and pick out relevant entries
                    beta_reshape = beta_cur
                    for _ in range(len(shape_beta)):
                        beta_reshape = tf.expand_dims(beta_reshape, -1)
                    return tf.reduce_sum(self._soft_abs(x) * beta_reshape,
                                         axis=2)

                # Accumulator for sum over columns
                samples = tf.random.categorical(
                    tf.log(tf.reshape(beta_reduce, [-1, size_x]) + 1e-10),
                    k_sample)
                samples = tf.one_hot(tf.reshape(samples, [-1]),
                                     size_x,
                                     axis=-1)
                samples = tf.reshape(samples,
                                     (batch_shape + [k_sample] + shape_beta))
                x_acc = process_columns(
                    samples,
                    tf.ones(batch_shape + [k_sample]) / k_sample)
                w_norm = x_acc / beta_l
            else:
                raise ValueError('Unknown verification option: ' +
                                 self._verify_option)

        return w_norm
Пример #27
0
 def _build(self, inpt):
     n = np.prod(self._output_size)
     mlp = MLP(self._n_hidden, n_out=n)
     reshape = snt.BatchReshape(self._output_size)
     seq = snt.Sequential([mlp, reshape])
     return seq(inpt)
Пример #28
0
 def apply_batch_reshape(self, wrapper, shape):
     return IntervalBounds(
         snt.BatchReshape(shape)(self.lower),
         snt.BatchReshape(shape)(self.upper))
Пример #29
0
    def _build(self, inputs):
        """Constructs the generator graph.

    Args:
      inputs: `tf.Tensor` with the input of the generator.

    Returns:
      `tf.Tensor`, the generated samples.
    """
        leaky_relu_activation = lambda x: tf.maximum(0.2 * x, x)
        init_dict = {
            'w': tf.truncated_normal_initializer(seed=547, stddev=0.02),
            'b': tf.constant_initializer(0.3)
        }
        layer1 = snt.Linear(output_size=1024, initializers=init_dict)(inputs)
        layer2 = leaky_relu_activation(
            snt.BatchNorm(offset=1, scale=1,
                          decay_rate=0.9)(layer1,
                                          is_training=True,
                                          test_local_stats=True))
        layer3 = snt.Linear(output_size=128 * 7 * 7,
                            initializers=init_dict)(layer2)
        layer4 = leaky_relu_activation(
            snt.BatchNorm(offset=1, scale=1,
                          decay_rate=0.9)(layer3,
                                          is_training=True,
                                          test_local_stats=True))
        layer5 = snt.BatchReshape((7, 7, 128))(layer4)
        # ("Conv2DTranspose" ,{ "output_channels" : 64 ,"output_shape" : [14,14], "kernel_shape" : [4,4], "stride" : 2, "padding":"SAME" },    0),
        layer6 = snt.Conv2DTranspose(output_channels=64,
                                     output_shape=[14, 14],
                                     kernel_shape=[4, 4],
                                     stride=2,
                                     padding="SAME",
                                     initializers=init_dict)(layer5)
        layer7 = leaky_relu_activation(
            snt.BatchNorm(offset=1, scale=1,
                          decay_rate=0.9)(layer6,
                                          is_training=True,
                                          test_local_stats=True))
        # ("Conv2DTranspose" ,{ "output_channels" : 1 ,"output_shape" : [28,28], "kernel_shape" : [4,4], "stride" : 2, "padding":"SAME" },    0),
        layer8 = snt.Conv2DTranspose(output_channels=1,
                                     output_shape=[28, 28],
                                     kernel_shape=[4, 4],
                                     stride=2,
                                     padding="SAME",
                                     initializers=init_dict)(layer7)
        # Reshape the data to have rank 4.
        # inputs = leaky_relu_activation(inputs)

        # net = snt.nets.ConvNet2DTranspose(
        #     output_channels=[32, 1],
        #     output_shapes=[[14, 14], [28, 28]],
        #     strides=[2],
        #     paddings=[snt.SAME],
        #     kernel_shapes=[[5, 5]],
        #     use_batch_norm=False,
        #     initializers=init_dict)

        # # We use tanh to ensure that the generated samples are in the same range
        # # as the data.
        return tf.nn.sigmoid(layer8)
Пример #30
0
 def reshape_duals_forwards(self, next_layer, dual_vars):
     if next_layer.reshape:
         # There was a reshape prior to the next layer.
         dual_vars = snt.BatchReshape(next_layer.input_shape,
                                      preserve_dims=2)(dual_vars)
     return dual_vars