Exemple #1
0
    def hourglass_module(inp, stageNum, totalJoints):
        if stageNum > 0:
            down_sample = max_pool(inp,
                                   2,
                                   2,
                                   2,
                                   2,
                                   name="hourglass_downsample_%d" % stageNum)

            block_front = slim.stack(
                down_sample,
                inverted_bottleneck, [
                    (up_channel_ratio(6), out_channel_ratio(24), 0, 3),
                    (up_channel_ratio(6), out_channel_ratio(24), 0, 3),
                    (up_channel_ratio(6), out_channel_ratio(24), 0, 3),
                    (up_channel_ratio(6), out_channel_ratio(24), 0, 3),
                    (up_channel_ratio(6), out_channel_ratio(24), 0, 3),
                ],
                scope="hourglass_front_%d" % stageNum)
            stageNum -= 1
            block_mid = LayerProvider.hourglass_module(block_front, stageNum,
                                                       totalJoints)
            block_back = inverted_bottleneck(block_mid,
                                             up_channel_ratio(6),
                                             totalJoints,
                                             0,
                                             3,
                                             scope="hourglass_back_%d" %
                                             stageNum)

            up_sample = upsample(block_back, 2,
                                 "hourglass_upsample_%d" % stageNum)

            # jump layer
            branch_jump = slim.stack(
                inp,
                inverted_bottleneck, [
                    (up_channel_ratio(6), out_channel_ratio(24), 0, 3),
                    (up_channel_ratio(6), out_channel_ratio(24), 0, 3),
                    (up_channel_ratio(6), out_channel_ratio(24), 0, 3),
                    (up_channel_ratio(6), out_channel_ratio(24), 0, 3),
                    (up_channel_ratio(6), totalJoints, 0, 3),
                ],
                scope="hourglass_branch_jump_%d" % stageNum)

            curr_hg_out = tf.add(up_sample,
                                 branch_jump,
                                 name="hourglass_out_%d" % stageNum)
            # mid supervise
            l2s.append(curr_hg_out)

            return curr_hg_out

        _ = inverted_bottleneck(inp,
                                up_channel_ratio(6),
                                out_channel_ratio(24),
                                0,
                                3,
                                scope="hourglass_mid_%d" % stageNum)
        return _
def hourglass_module(inputs, depth=4, deconv='transpose'):
    with tf.variable_scope('depth_{}'.format(depth)):
        # buttom up layer
        net = slim.max_pool2d(inputs, (2, 2), scope='pool')
        net = slim.stack(net,
                         hourglass_bottleneck, [256, 256, 256],
                         scope='bottom_up')

        #connecting layers
        if depth > 0:
            net = hourglass_module(net, depth=depth - 1, deconv=deconv)
        else:
            net = hourglass_bottleneck(net,
                                       out_channel=256,
                                       scope='connecting_layer')

        #top down layers
        net = hourglass_bottleneck(net, out_channel=256, scope='top_down')
        net = deconv_layer(net, 2, 256, method=deconv, scope='deconv_layer')

        #residual layers
        net += slim.stack(inputs,
                          residual_bottleneck, [256, 256, 256],
                          scope='res_block')

        return net
def build_network(input, trainable):
    is_trainable(trainable)

    net = convb(input, 3, 3, out_channel_ratio(16), 2, name="Conv2d_0")

    # 128, 112
    net = slim.stack(net,
                     inverted_bottleneck, [(1, out_channel_ratio(16), 0, 3),
                                           (1, out_channel_ratio(16), 0, 3)],
                     scope="Conv2d_1")

    # 64, 56
    net = slim.stack(net,
                     inverted_bottleneck, [
                         (up_channel_ratio(6), out_channel_ratio(24), 1, 3),
                         (up_channel_ratio(6), out_channel_ratio(24), 0, 3),
                         (up_channel_ratio(6), out_channel_ratio(24), 0, 3),
                         (up_channel_ratio(6), out_channel_ratio(24), 0, 3),
                         (up_channel_ratio(6), out_channel_ratio(24), 0, 3),
                     ],
                     scope="Conv2d_2")

    net_h_w = int(net.shape[1])
    # build network recursively
    hg_out = hourglass_module(net, STAGE_NUM)

    for index, l2 in enumerate(l2s):
        l2_w_h = int(l2.shape[1])
        if l2_w_h == net_h_w:
            continue
        scale = net_h_w // l2_w_h
        l2s[index] = upsample(l2, scale, name="upsample_for_loss_%d" % index)

    return hg_out, l2s
Exemple #4
0
    def build(self):
        rgb_embed = slim.stack(self.input_rgb,
                               slim.conv2d,
                               self.rgb_layers,
                               scope='rgb_embed')
        d_embed = slim.stack(self.input_d,
                             slim.conv2d,
                             self.depth_layers,
                             scope='d_embed')
        _ = tf.concat([rgb_embed, d_embed], axis=-1, name='concat_rgbd')
        _ = slim.stack(_, slim.conv2d, self.cnn_layers, scope='cnn')

        _ = spatial_soft_argmax(_)

        self.aux_output = slim.fully_connected(_,
                                               self.aux_task_dim,
                                               scope='aux')

        _ = tf.concat([_, self.aux_output, self.input_obs], -1)
        _ = slim.stack(_, slim.fully_connected, self.fc_layers, scope='fc')
        _ = slim.fully_connected(_,
                                 self.output_dim,
                                 activation_fn=tf.tanh,
                                 scope='fc_out')
        _ = (_ + self.output_translate) * self.output_scale
        self.output = _

        self.l2_loss = tf.reduce_mean((self.output - self.gt_output)**2)
        self.l1_loss = tf.reduce_mean(tf.abs(self.output - self.gt_output))
        self.aux_loss = tf.reduce_mean((self.aux_output - self.gt_aux)**2)
        self.loss = self.l2_loss * self.l2_loss_weight + \
                    self.l1_loss * self.l1_loss_weight + \
                    self.aux_loss * self.aux_loss_weight
Exemple #5
0
def build_model_inference(video_input, audio_input):

    video_input = tf.convert_to_tensor(video_input)
    audio_input = tf.convert_to_tensor(audio_input)
    reshaped_video_input = tf.reshape(video_input, [-1, 1024])
    reshaped_audio_input = tf.reshape(audio_input, [-1, 128])

    video_NetVLAD = NetVLAD(1024, FLAGS.max_frames, FLAGS.cluster_size,
                            FLAGS.add_batch_norm, FLAGS.is_training)
    audio_NetVLAD = NetVLAD(128, FLAGS.max_frames, FLAGS.cluster_size / 2,
                            FLAGS.add_batch_norm, FLAGS.is_training)

    with tf.variable_scope('video_netVLAD'):
        vlad_video = video_NetVLAD.forward(reshaped_video_input)
    with tf.variable_scope('audio_netVLAD'):
        vlad_audio = audio_NetVLAD.forward(reshaped_audio_input)

    l2_penalty = 1e-8

    video_activation = slim.stack(
        vlad_video,
        slim.fully_connected, [2048, 1024, 128],
        activation_fn=None,
        weights_regularizer=slim.l2_regularizer(l2_penalty),
        scope="video_fc")
    audio_activation = slim.stack(
        vlad_audio,
        slim.fully_connected, [2048, 1024, 128],
        activation_fn=None,
        weights_regularizer=slim.l2_regularizer(l2_penalty),
        scope="audio_fc")

    return video_activation, audio_activation
def forward(inputs,
            num_outputs,
            input_dim=None,
            hiddens=[200],
            activation_fn=tf.nn.relu,
            weights_initializer=initializers.xavier_initializer(),
            weights_regularizer=None,
            biases_initializer=init_ops.zeros_initializer(),
            biases_regularizer=None,
            reuse=None,
            scope=None):
    """
  similary as melt.slim.layers.mlp but the first step(from input to first hidden adjusted so input can be sparse)
  """

    assert len(hiddens) >= 1, "must at least contain one hidden layer"

    scope = 'mlp' if scope is None else scope
    with tf.variable_scope(scope):
        outputs = melt.layers.fully_connected(
            inputs,
            num_outputs,
            input_dim=input_dim,
            activation_fn=activation_fn,
            weights_initializer=weights_initializer,
            weights_regularizer=weights_regularizer,
            biases_initializer=biases_initializer,
            biases_regularizer=biases_regularizer,
            reuse=reuse,
            scope='fc_0')

        #--------other hidden layers
        # for i in xrange(len(hiddens) -1):
        #   outputs = slim.fully_connected(outputs, hiddens[i + 1],
        #                          activation_fn=activation_fn,
        #                          weights_initializer=weights_initializer,
        #                          weights_regularizer=weights_regularizer,
        #                          biases_initializer=biases_initializer,
        #                          biases_regularizer=biases_regularizer,
        #                          scope='fc_%d'%i+1)

        slim.stack(outputs,
                   slim.fully_connected,
                   hiddens[1:],
                   activation_fn=activation_fn,
                   weights_initializer=weights_initializer,
                   weights_regularizer=weights_regularizer,
                   biases_initializer=biases_initializer,
                   biases_regularizer=biases_regularizer,
                   scope='fc')

        return slim.linear(outputs,
                           num_outputs,
                           weights_initializer=weights_initializer,
                           weights_regularizer=weights_regularizer,
                           biases_initializer=biases_initializer,
                           biases_regularizer=biases_regularizer,
                           scope='linear')
def forward(train_data):
    net = slim.stack(train_data,
                     slim.conv2d, [(64, [3, 3]), (32, [3, 3]), (32, [3, 3])],
                     weights_regularizer=slim.l2_regularizer(0.0005),
                     scope='conv3')
    conv_out = tf.reshape(net,
                          (-1, net.shape[1] * net.shape[2] * net.shape[3]))
    fc = slim.stack(conv_out,
                    slim.fully_connected, [500, 100, 10],
                    weights_regularizer=slim.l2_regularizer(0.0005),
                    scope='fc')
    return fc
Exemple #8
0
    def stage(self, inputs, outputSize, stageNumber, kernel_size=3):

        output = slim.stack(inputs, self.inverted_bottleneck,
                            [
                                (2, 32, 0, kernel_size, 4),
                                (2, 32, 0, kernel_size, 2),
                                (2, 32, 0, kernel_size, 1),
                            ], scope="stage_%d_mv2" % stageNumber)

        return slim.stack(output, self.separable_conv,
                          [
                              (64, 1, 1),
                              (outputSize, 1, 1)
                          ], scope="stage_%d_mv1" % stageNumber)
Exemple #9
0
def mvn_inference_network(x, n_latent_dim, hidden_units):
    """Multi-Variate Normal parameterized cholskey
    sigma = chol(sigma) * chol(sigma)^T
    """
    with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.relu):
        hidden_units + [None]
        net = slim.stack(x,
                         slim.fully_connected,
                         hidden_units,
                         scope='encoder_network')

        # input layer to the gaussian latent variable
        layers.utils.collect_named_outputs(enums.VariationalParams.COLLECTION,
                                           enums.VariationalParams.INPUT, net)

        mvn_params = slim.fully_connected(
            net,
            n_latent_dim * (n_latent_dim + 1),  # over parameterized
            activation_fn=None,
            scope='gaussian_params')

    # The mean parameter is unconstrained
    mu = mvn_params[:, :n_latent_dim]

    # The standard deviation must be positive. Parametrize with a softplus and
    # add a small epsilon for numerical stability
    chol_subtril = (
        1e-6 + tf.nn.softplus(mvn_params[:, n_latent_dim:2 * n_latent_dim]))
    chol = distribution_util.fill_lower_triangular(chol_subtril)

    return mu, chol
Exemple #10
0
def gaussian_inference_network(x, n_latent_dim, hidden_units):
    with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.relu):
        hidden_units + [None]
        net = slim.stack(x,
                         slim.fully_connected,
                         hidden_units,
                         scope='encoder_network')

        # input layer to the gaussian latent variable
        layers.utils.collect_named_outputs(enums.VariationalParams.COLLECTION,
                                           enums.VariationalParams.INPUT, net)

        gaussian_params = slim.fully_connected(net,
                                               2 * n_latent_dim,
                                               activation_fn=None,
                                               scope='gaussian_params')

    # The mean parameter is unconstrained
    mu = layers.utils.collect_named_outputs(enums.VariationalParams.COLLECTION,
                                            enums.VariationalParams.LOCATION,
                                            gaussian_params[:, :n_latent_dim])

    # The standard deviation must be positive. Parametrize with a softplus and
    # add a small epsilon for numerical stability
    sigma = layers.utils.collect_named_outputs(
        enums.VariationalParams.COLLECTION, enums.VariationalParams.SCALE,
        1e-6 + tf.nn.softplus(gaussian_params[:, n_latent_dim:]))

    return mu, sigma
Exemple #11
0
 def _set_model(self):
     self.input['data'] = tf.placeholder(dtype=tf.float32,
                                         shape=[None, 1, 10, 10],
                                         name='input')
     self.label['label'] = tf.placeholder(dtype=tf.float32,
                                          shape=[None, 2],
                                          name='label')
     h = self.input['data']
     h = slim.flatten(h)
     with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.relu):
         if self.params['filters'] is None:
             filters = [128, 256, 512, 1024, 2048, 4096, 8192]
         else:
             filters = self.params['filters']
             # filters = [128, 256, 512, 1024, 2048, 4096, 8192]
         h = slim.stack(h, slim.fully_connected, filters)
     h = tf.nn.dropout(h, self.kp)
     pos_pred = slim.fully_connected(h, 2, activation_fn=None)
     self.output['pos_pred'] = pos_pred
     error = self.label['label'] - self.output['pos_pred']
     l = tf.square(error)
     l = tf.reduce_sum(l, axis=1)
     l = tf.sqrt(l)
     l = tf.reduce_mean(l)
     self.loss['loss'] = l
     tf.summary.scalar('loss', self.loss['loss'])
     self.train_op['main'] = tf.train.RMSPropOptimizer(
         self.lr['default']).minimize(self.loss['loss'],
                                      global_step=self.gs)
     self.summary_op['all'] = tf.summary.merge_all()
def value_network_slim(states):
    net = slim.stack(states,
                     slim.fully_connected, [24, 24],
                     activation_fn=tf.nn.tanh,
                     scope='stack')
    net = slim.fully_connected(net, 1, activation_fn=None, scope='full')
    return net
def actor_network(states, actions):
    # should return predicted_actions, action_probs
    with tf.variable_scope('mu'):
        net = slim.stack(states,
                         slim.fully_connected, [30],
                         activation_fn=tf.nn.tanh,
                         scope='stack')
        mu = slim.fully_connected(net,
                                  action_shape,
                                  activation_fn=None,
                                  scope='full')

    with tf.variable_scope('std'):
        logstd = slim.fully_connected(net,
                                      action_shape,
                                      activation_fn=None,
                                      scope='full')
        std = tf.exp(logstd)

    prob_dist = tf.distributions.Normal(loc=mu, scale=std)
    batch_size = tf.shape(states)[0]
    # predicted actions
    predicted_actions = tf.clip_by_value(prob_dist.sample(batch_size), -2.0,
                                         2.0)
    # action_probs. each row of shape action_shape, needs to be reduced by multiplication
    action_probs = prob_dist.prob(actions)
    action_probs = tf.reduce_prod(action_probs, axis=1)
    return predicted_actions, action_probs
Exemple #14
0
def DiscriminatorNetwork(test_in, hSeq=(32, 256, 64), gamma_reg=gamma_reg):
    '''NB: Generic Encapsulated Discriminator NN model: unconditional'''
    # with tf.variable_scope(scope, reuse=reuseFlag):
    regularizer = slim.l2_regularizer(gamma_reg)
    hidden = slim.fully_connected(slim.flatten(test_in),
                                  num_outputs=hSeq[0],
                                  activation_fn=tf.nn.relu,
                                  weights_regularizer=regularizer,
                                  weights_initializer=wgts_init,
                                  biases_initializer=bias_init)
    if (len(hSeq) > 1):
        hidden = slim.stack(hidden,
                            slim.fully_connected,
                            list(hSeq[1:]),
                            activation_fn=tf.nn.sigmoid,
                            weights_regularizer=regularizer,
                            weights_initializer=wgts_init,
                            biases_initializer=bias_init)

    disc = slim.fully_connected(hidden,
                                1,
                                activation_fn=tf.nn.sigmoid,
                                weights_initializer=wgts_init,
                                biases_initializer=bias_init)
    return disc
Exemple #15
0
def discriminator(d_i,
                  width,
                  height,
                  channels=1,
                  latent_dim=50,
                  is_training=True):
    with tf.variable_scope('discriminator'):
        d_i = tf.reshape(d_i, [-1, height, width, channels])

        with slim.arg_scope([slim.conv2d],
                            normalizer_fn=slim.batch_norm,
                            normalizer_params={
                                'is_training': is_training,
                                'scale': True
                            },
                            activation_fn=tf.nn.elu):
            output = slim.conv2d(d_i, 32, 5, stride=1, scope='conv1')
            output = slim.stack(output,
                                slim.conv2d, [128, 256, 256],
                                kernel_size=5,
                                stride=2,
                                scope='conv2')
            output = slim.flatten(output)

        lth_layer = slim.fully_connected(output,
                                         1024,
                                         activation_fn=tf.nn.elu,
                                         scope='fc_lth')
        discrimination = slim.fully_connected(lth_layer,
                                              1,
                                              activation_fn=tf.nn.sigmoid,
                                              scope='fc_discrimination')
    return discrimination, lth_layer
Exemple #16
0
def DiscriminatorNetwork_Cond(class_in,
                              gen_d_in,
                              hSeq=(32, 256, 64),
                              gamma_reg=gamma_reg):
    '''NB: Custom Class-Conditional Encapsulated Discriminator NN model'''
    # with tf.variable_scope(scope, reuse=reuseFlag):
    regularizer = slim.l2_regularizer(gamma_reg)
    hidden = slim.fully_connected(slim.flatten(
        tf.concat([class_in, gen_d_in], axis=1)),
                                  num_outputs=hSeq[0],
                                  activation_fn=tf.nn.relu,
                                  weights_regularizer=regularizer,
                                  weights_initializer=wgts_init,
                                  biases_initializer=bias_init)
    hidden = slim.dropout(hidden, keep_prob=__kp__, is_training=True)
    hidden = slim.stack(hidden,
                        slim.fully_connected,
                        list(hSeq[1:]),
                        activation_fn=tf.nn.sigmoid,
                        weights_regularizer=regularizer,
                        weights_initializer=wgts_init,
                        biases_initializer=bias_init)
    disc = slim.fully_connected(hidden,
                                1,
                                activation_fn=tf.nn.sigmoid,
                                weights_initializer=wgts_init,
                                biases_initializer=bias_init)
    return disc
    def get_output(self, shapes):
        """
        Gets discriminator's predictions for shapes.

        Args:
            shapes (Nx10).

        Returns:
            Predictions (Nx1).
        """
        data_format = 'NHWC'
        with tf.variable_scope('D_shape', reuse=self.reuse) as scope:
            with slim.arg_scope(
                    [slim.conv2d, slim.fully_connected],
                    weights_regularizer=slim.l2_regularizer(self.wd)):
                with slim.arg_scope([slim.conv2d], data_format=data_format):
                    shapes = slim.stack(
                        inputs=shapes,
                        layer=slim.fully_connected,
                        stack_args=[10, 5],
                        scope='shape_fc1'
                    )
                    shape_out = slim.fully_connected(
                        inputs=shapes,
                        num_outputs=1,
                        activation_fn=None,
                        reuse=self.reuse,
                        scope='shape_final'
                    )
                    out = tf.concat([shape_out], 1)
            if not self.reuse:
                self.update(tf.contrib.framework.get_variables(scope))
            return out
Exemple #18
0
def GeneratorNetwork(noise, out_dim, hSeq=(32, 256, 64), gamma_reg=gamma_reg):
    '''NB: Generic Encapsulated Generator NN model: unconditional'''
    # with tf.variable_scope(scope):
    regularizer = slim.l2_regularizer(gamma_reg)

    hidden = slim.fully_connected(slim.flatten(noise),
                                  num_outputs=hSeq[0],
                                  activation_fn=tf.nn.relu,
                                  weights_regularizer=regularizer,
                                  weights_initializer=wgts_init,
                                  biases_initializer=bias_init)
    if (len(hSeq) > 1):
        hidden = slim.stack(hidden,
                            slim.fully_connected,
                            list(hSeq[1:]),
                            activation_fn=tf.nn.tanh,
                            weights_regularizer=regularizer,
                            weights_initializer=wgts_init,
                            biases_initializer=bias_init)

    gen = slim.fully_connected(hidden,
                               num_outputs=out_dim,
                               activation_fn=tf.nn.sigmoid,
                               weights_initializer=wgts_init,
                               biases_initializer=bias_init)
    return gen
def LeNet(images):
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        activation_fn=tf.nn.relu,
                        weights_initializer=tf.truncated_normal_initializer(
                            0.0, 0.1),
                        weights_regularizer=slim.l2_regularizer(0.0005)):
        # layer 1
        net = slim.conv2d(images,
                          6, [5, 5],
                          stride=1,
                          padding='VALID',
                          scope='conv1')
        net = slim.max_pool2d(net, [2, 2],
                              stride=2,
                              padding='VALID',
                              scope='pool1')
        # layer 2
        net = slim.conv2d(net,
                          16, [5, 5],
                          stride=1,
                          padding='VALID',
                          scope='conv2')
        net = slim.max_pool2d(net, [2, 2],
                              stride=2,
                              padding='VALID',
                              scope='pool2')
        net = slim.flatten(net, scope='flatten')
        print(net.shape)
        # layer 3, 4
        net = slim.stack(net, slim.fully_connected, [120, 84], scope='fc')
        # layer 5
        net = slim.fully_connected(net, 10, activation_fn=None, scope='fc_3')
    return net
def build_model(x, layer_sizes, is_training, output_dim, is_reuse):
    with tf.variable_scope("model"):
        winit = tf.contrib.layers.xavier_initializer()
        binit = tf.constant_initializer(0)
        activ_fn = tf.nn.sigmoid
        normalizer_fn = slim.batch_norm
        normalizer_params = {'is_training': is_training,
                             'decay': 0.999, 'center': True,
                             'scale': True, 'updates_collections': None}
        #keep_prob=0.5
        #is_training=is_training


        with slim.arg_scope([slim.fully_connected],
                                activation_fn=activ_fn,
                                weights_initializer=winit,
                                biases_initializer=binit,
                                weights_regularizer=slim.l2_regularizer(0.05),
                                reuse = is_reuse,
                                normalizer_fn=normalizer_fn,
                                normalizer_params=normalizer_params):
        
            # build the bulk of the layers
            layers = slim.stack(x, slim.fully_connected, layer_sizes, scope="layer")

            # final layer has NO activation, NO BN
            return slim.fully_connected(layers, output_dim,
                                    activation_fn=None,
                                    normalizer_fn=None,
                                    weights_initializer=winit,
                                    biases_initializer=binit,
                                    weights_regularizer=slim.l2_regularizer(0.05),
                                    scope='prediction')
Exemple #21
0
def encoder(x, width, height, channels=1, latent_dim=50, is_training=True):
    with tf.variable_scope('encoder'):
        with slim.arg_scope([slim.conv2d],
                            normalizer_fn=slim.batch_norm,
                            normalizer_params={
                                'is_training': is_training,
                                'scale': True
                            },
                            activation_fn=tf.nn.elu):
            x = tf.reshape(x, [-1, height, width, channels])
            output = slim.stack(x,
                                slim.conv2d, [64, 128, 256],
                                kernel_size=5,
                                stride=2,
                                scope='conv')
            output = slim.flatten(output)

        z_mean = slim.fully_connected(output,
                                      latent_dim,
                                      activation_fn=None,
                                      scope='z_mean')
        z_log_sigma_sq = slim.fully_connected(output,
                                              latent_dim,
                                              activation_fn=None,
                                              scope='z_log_sigma_sq')
    return z_mean, z_log_sigma_sq
Exemple #22
0
def deep_net(x):
    with slim.arg_scope([slim.fully_connected],
                        activation_fn=tf.nn.relu,
                        weights_initializer=tf.orthogonal_initializer(),
                        biases_initializer=tf.constant_initializer(0.0)):
        h = slim.stack(x, slim.fully_connected, [200] * 17)
    return slim.fully_connected(h, 10, activation_fn=None)
Exemple #23
0
    def _build_cnn(self, x, log=no_op):
        log('- build-cnn -')
        with tf.name_scope('build_cnn', [x]):
            with tf.name_scope('format_in'):
                log('cnn-input', x.shape)
                dim_t = tf_shape(x)[1]
                x = axial_reshape(x, [(0,1), 2, 3, 4]) # (merge "time" with batch)
                log('cnn-format', x.shape)
            with tf.name_scope('cnn'):
                with slim.arg_scope(self._arg_scope()):
                    x = slim.conv2d(x, 64, 7, 2, scope='conv', padding='SAME')
                    x = slim.stack(x,
                            slim.separable_conv2d,
                            [(128,3,1,2),(256,3,1,2),(196,1,1,1),(384,3,1,2),(256,1,1,1),(512,3,1,2),(512,1,1,1)],
                            scope='sconv',
                            padding='SAME',
                            )
                    log('post-sconv', x.shape) #NTx4x5
                    #x = tf.reduce_mean(x, axis=[1,2])

                    #x = axial_reshape(x, [0,(1,2,3)])
                    x = slim.separable_conv2d(x, 512, (6,8), 1, 1, scope='reduction', padding='VALID')
                    #x = tf.expand_dims(x, 1)
                    #x = tf.expand_dims(x, 1)
                    #x = slim.separable_conv2d(x, 1024, 1, 1, 1, scope='reduction')
                    x = slim.dropout(x, keep_prob=cfg.DROPOUT, is_training=self.train_, scope='dropout')
                    x = tf.squeeze(x, [1,2])
                    log('post-cnn', x.shape)
            with tf.name_scope('format_out'):
                x = split_reshape(x, 0, dim_t) # ==> [N,T,...]
                log('cnn-output', x.shape)
        log('-------------')
        return x
Exemple #24
0
    def encoder(self, x):
        """ Convolutional variational encoder to encode image into a low-dimensional latent code
        If config.conv == False it is a MLP VAE. If config.use_vae == False, it is a normal encoder
        :param x: sequence of images
        :return: a, a_mu, a_var
        """
        with tf.variable_scope('vae/encoder'):
            if self.config.conv:
                x_flat_conv = tf.reshape(x, (-1, self.d1, self.d2, 1))
                enc_hidden = slim.stack(x_flat_conv,
                                        slim.conv2d,
                                        self.num_filters,
                                        kernel_size=self.config.filter_size,
                                        stride=2,
                                        activation_fn=self.activation_fn,
                                        padding='SAME')
                enc_flat = slim.flatten(enc_hidden)
                self.enc_shape = enc_hidden.get_shape().as_list()[1:]

            else:
                x_flat = tf.reshape(x, (-1, self.d1 * self.d2))
                enc_flat = slim.repeat(x_flat, self.config.num_layers, slim.fully_connected,
                                       self.config.vae_num_units, self.activation_fn)

            a_mu = slim.fully_connected(enc_flat, self.config.dim_a, activation_fn=None)
            if self.config.use_vae:
                a_var = slim.fully_connected(enc_flat, self.config.dim_a, activation_fn=tf.nn.sigmoid)
                a_var = self.config.noise_emission * a_var
                a = simple_sample(a_mu, a_var)
            else:
                a_var = tf.constant(1., dtype=tf.float32, shape=())
                a = a_mu
            a_seq = tf.reshape(a, tf.stack((-1, self.ph_steps, self.config.dim_a)))
        return a_seq, a_mu, a_var
Exemple #25
0
def decoder(z, width, height, channels=1, latent_dim=50, is_training=True):
    # Compute downsampled dimensions based on input width/height.
    d_width = int(np.ceil(width / 8.0))
    d_height = int(np.ceil(height / 8.0))

    with tf.variable_scope('decoder'):
        output = slim.fully_connected(z,
                                      d_width * d_height * 256,
                                      activation_fn=tf.nn.elu,
                                      scope='fc')

        with slim.arg_scope([slim.conv2d_transpose],
                            normalizer_fn=slim.batch_norm,
                            normalizer_params={
                                'is_training': is_training,
                                'scale': True
                            },
                            activation_fn=tf.nn.elu):
            output = tf.reshape(output, [-1, d_width, d_height, 256])
            output = slim.stack(output,
                                slim.conv2d_transpose, [256, 128, 32],
                                kernel_size=5,
                                stride=2,
                                scope='deconv1')
            output = slim.conv2d_transpose(output,
                                           channels,
                                           1,
                                           activation_fn=tf.nn.sigmoid,
                                           scope='deconv2')
            output = slim.flatten(output)
    return output
Exemple #26
0
def critic_network(states, actions):
  with tf.variable_scope('critic'):
    # state_net = tflearn.fully_connected(states, 300, activation='relu', scope='full_state')
    # action_net = tflearn.fully_connected(actions, 300, activation='relu', scope='full_action')
    state_net = slim.stack(states, slim.fully_connected, [400], activation_fn=tf.nn.relu, scope='stack_state')
    # action_net = slim.stack(actions, slim.fully_connected, [300], activation_fn=tf.nn.relu, scope='stack_action')
    # net = tf.contrib.layers.fully_connected(states, 400, scope='full_state')
    # net = tflearn.fully_connected(states, 400)
    # net = tflearn.layers.normalization.batch_normalization(net)
    # net = tflearn.activations.relu(net)
    net = tf.concat([state_net, actions], 1)
    # net = tf.contrib.layers.fully_connected(net, 400)
    net = slim.fully_connected(net, 300, activation_fn=tf.nn.relu, scope='full')
    # w1 = tf.get_variable('w1', shape=[400, 300], dtype=tf.float32)
    # w2 = tf.get_variable('w2', shape=[1, 300], dtype=tf.float32)
    # b = tf.get_variable('b', shape=[300], dtype=tf.float32)
    # t1 = tflearn.fully_connected(net, 300)
    # t2 = tflearn.fully_connected(actions, 300)
    # print t1.W, t2.W
    # net = tflearn.activation(
    #     tf.matmul(net, t1.W) + tf.matmul(actions, t2.W) + t2.b, activation='relu')

    # net = tf.matmul(net, w1) + tf.matmul(actions, w2) + b
    # net = tf.nn.relu(net)
    # net = slim.stack(net, slim.fully_connected, [5], activation_fn=tf.nn.relu, scope='stack')
    # net = slim.fully_connected(net, 1, activation_fn=tf.nn.relu, scope='full')
    # net = tf.contrib.layers.fully_connected(net, 1, scope='last')
    # w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
    # net = slim.stack(net, slim.fully_connected, [24, 1], scope='final', biases_initializer=tf.zeros_initializer())
    # net = tf.layers.dense(net, 1, activation=tf.nn.relu, use_bias=True, name='last')
    # net = tflearn.fully_connected(net, 1)
    net = slim.fully_connected(net, 1, activation_fn=None, scope='last', weights_initializer=tf.random_uniform_initializer(-3e-4, 3e-4))
    net = tf.squeeze(net, axis=[1])
    return net
def actor_network(states):
  with tf.variable_scope('actor', reuse=tf.AUTO_REUSE):
    net = slim.stack(states, slim.fully_connected, [400, 300], activation_fn=tf.nn.relu, scope='stack')
    net = slim.fully_connected(net, action_shape, activation_fn=tf.nn.tanh, scope='full', weights_initializer=tf.random_uniform_initializer(-3e-4, 3e-4))
    # mult with action bounds
    net = ACTION_SCALE_MAX * net
    return net
Exemple #28
0
def fc_net(inp,
           layers,
           out_layers,
           scope,
           lamba=1e-3,
           activation=tf.nn.relu,
           reuse=None,
           weights_initializer=initializers.xavier_initializer(uniform=False)):
    with slim.arg_scope([slim.fully_connected],
                        activation_fn=activation,
                        normalizer_fn=None,
                        weights_initializer=weights_initializer,
                        reuse=reuse,
                        weights_regularizer=slim.l2_regularizer(lamba)):

        if layers:
            h = slim.stack(inp, slim.fully_connected, layers, scope=scope)
            if not out_layers:
                return h
        else:
            h = inp
        outputs = []
        for i, (outdim, activation) in enumerate(out_layers):
            o1 = slim.fully_connected(h,
                                      outdim,
                                      activation_fn=activation,
                                      scope=scope + '_{}'.format(i + 1))
            outputs.append(o1)
        return outputs if len(outputs) > 1 else outputs[0]
Exemple #29
0
def GeneratorNetwork(noise,
                     out_dim,
                     hSeq=(32, 256, 64),
                     gamma_reg=gamma_reg,
                     scope="generator"):
    #with tf.variable_scope(scope):
    regularizer = slim.l2_regularizer(gamma_reg)

    hidden = slim.fully_connected(
        slim.flatten(noise),
        num_outputs=hSeq[0],
        activation_fn=tf.nn.relu,
        weights_initializer=tf.random_normal_initializer(0, 0.1),
        weights_regularizer=regularizer)
    if (len(hSeq) > 1):
        hidden = slim.stack(hidden,
                            slim.fully_connected,
                            list(hSeq[1:]),
                            activation_fn=tf.nn.tanh,
                            weights_initializer=tf.random_normal_initializer(
                                0, 0.1),
                            weights_regularizer=regularizer)

    gen = slim.fully_connected(
        hidden,
        num_outputs=out_dim,
        activation_fn=tf.nn.sigmoid,
        weights_initializer=tf.random_normal_initializer(0, 0.1),
        biases_initializer=None)
    return gen
Exemple #30
0
def DiscriminatorNetwork(test_in,
                         hSeq=(32, 256, 64),
                         gamma_reg=gamma_reg,
                         scope="discriminator",
                         reuseFlag=None):
    #with tf.variable_scope(scope, reuse=reuseFlag):
    regularizer = slim.l2_regularizer(gamma_reg)

    hidden = slim.fully_connected(
        slim.flatten(test_in),
        num_outputs=hSeq[0],
        activation_fn=tf.nn.relu,
        weights_initializer=tf.random_normal_initializer(0, 0.1),
        weights_regularizer=regularizer)
    if (len(hSeq) > 1):
        hidden = slim.stack(hidden,
                            slim.fully_connected,
                            list(hSeq[1:]),
                            activation_fn=tf.nn.sigmoid,
                            weights_initializer=tf.random_normal_initializer(
                                0, 0.1),
                            weights_regularizer=regularizer)

    disc = slim.fully_connected(
        hidden,
        1,
        activation_fn=tf.nn.sigmoid,
        weights_initializer=tf.random_normal_initializer(0, 0.1),
        biases_initializer=None)
    return disc
Exemple #31
0
def Discriminator_separable_rotations(
        poses,
        shapes,
        weight_decay,
):
    """
    23 Discriminators on each joint + 1 for all joints + 1 for shape.
    To share the params on rotations, this treats the 23 rotation matrices
    as a "vertical image":
    Do 1x1 conv, then send off to 23 independent classifiers.

    Input:
    - poses: N x 23 x 1 x 9, NHWC ALWAYS!!
    - shapes: N x 10
    - weight_decay: float

    Outputs:
    - prediction: N x (1+23) or N x (1+23+1) if do_joint is on.
    - variables: tf variables
    """
    data_format = "NHWC"
    with tf.name_scope("Discriminator_sep_rotations", [poses, shapes]):
        with tf.variable_scope("D") as scope:
            with slim.arg_scope(
                [slim.conv2d, slim.fully_connected],
                    weights_regularizer=slim.l2_regularizer(weight_decay)):
                with slim.arg_scope([slim.conv2d], data_format=data_format):
                    poses = slim.conv2d(poses, 32, [1, 1], scope='D_conv1')
                    poses = slim.conv2d(poses, 32, [1, 1], scope='D_conv2')
                    theta_out = []
                    for i in range(0, 23):
                        theta_out.append(
                            slim.fully_connected(
                                poses[:, i, :, :],
                                1,
                                activation_fn=None,
                                scope="pose_out_j%d" % i))
                    theta_out_all = tf.squeeze(tf.stack(theta_out, axis=1))

                    # Do shape on it's own:
                    shapes = slim.stack(
                        shapes,
                        slim.fully_connected, [10, 5],
                        scope="shape_fc1")
                    shape_out = slim.fully_connected(
                        shapes, 1, activation_fn=None, scope="shape_final")
                    """ Compute joint correlation prior!"""
                    nz_feat = 1024
                    poses_all = slim.flatten(poses, scope='vectorize')
                    poses_all = slim.fully_connected(
                        poses_all, nz_feat, scope="D_alljoints_fc1")
                    poses_all = slim.fully_connected(
                        poses_all, nz_feat, scope="D_alljoints_fc2")
                    poses_all_out = slim.fully_connected(
                        poses_all,
                        1,
                        activation_fn=None,
                        scope="D_alljoints_out")
                    out = tf.concat([theta_out_all,
                                     poses_all_out, shape_out], 1)

            variables = tf.contrib.framework.get_variables(scope)
            return out, variables