Exemple #1
0
    def forward_MLP(self,
                    name,
                    all_params,
                    input_tensor=None,
                    task_idx=None,
                    noise=None,
                    input_shape=None,
                    n_hidden=-1,
                    hidden_nonlinearity=tf.identity,
                    output_nonlinearity=tf.identity,
                    batch_normalization=False,
                    reuse=True,
                    is_training=False):
        # is_training and reuse are for batch norm, irrelevant if batch_norm set to False
        # set reuse to False if the first time this func is called.
        with tf.variable_scope(name):
            if input_tensor is None:
                assert input_shape is not None
                l_in = make_input(shape=(None, ) + input_shape,
                                  input_var=None,
                                  name='input')
                l_tasks = tf.placeholder(tf.int32,
                                         shape=(None, ),
                                         name="task_idxs")
                l_noise = make_input(shape=(None, self.latent_dim),
                                     input_var=None,
                                     name='noise')
            else:
                l_in = input_tensor
                l_tasks = task_idx
                l_noise = noise

            chosen_latent_means = tf.gather(all_params['latent_means'],
                                            l_tasks)
            chosen_latent_stds = tf.gather(all_params['latent_stds'], l_tasks)
            zs = chosen_latent_means + l_noise * tf.exp(chosen_latent_stds)
            # l_hid = l_in
            l_hid = tf.concat([l_in, zs], axis=1)

            for idx in range(n_hidden):
                l_hid = forward_dense_layer(l_hid,
                                            all_params['W' + str(idx)],
                                            all_params['b' + str(idx)],
                                            batch_norm=batch_normalization,
                                            nonlinearity=hidden_nonlinearity,
                                            scope=str(idx),
                                            reuse=reuse,
                                            is_training=is_training)
            output = forward_dense_layer(
                l_hid,
                all_params['W' + str(n_hidden)],
                all_params['b' + str(n_hidden)],
                batch_norm=False,
                nonlinearity=output_nonlinearity,
            )
            return l_in, l_tasks, l_noise, zs, output
    def forward_MLP(self, name, all_params, input_tensor=None,
                    batch_normalization=False, reuse=True, is_training=False):
        # is_training and reuse are for batch norm, irrelevant if batch_norm set to False
        # is_training和reuse用于批处理规范,如果batch_norm设置为False则无关紧要
        # set reuse to False if the first time this func is called.
        # 如果第一次调用此func,则将reuse设置为False。
        with tf.variable_scope(name):
            if input_tensor is None:
                l_in = make_input(shape=self.input_shape, input_var=None, name='input')
            else:
                l_in = input_tensor

            l_hid = l_in

            for idx in range(self.n_hidden):
                l_hid = forward_dense_layer(l_hid, all_params['W'+str(idx)], all_params['b'+str(idx)],
                                            batch_norm=batch_normalization,
                                            nonlinearity=self.hidden_nonlinearity,
                                            scope=str(idx), reuse=reuse,
                                            is_training=is_training
                                            )
            output = forward_dense_layer(l_hid, all_params['W'+str(self.n_hidden)], all_params['b'+str(self.n_hidden)],
                                         batch_norm=False, nonlinearity=self.output_nonlinearity,
                                         )
            return l_in, output
Exemple #3
0
    def forward_MLP(self, name, all_params, input_tensor=None,
                    batch_normalization=False, reuse=True, is_training=False):
        # is_training and reuse are for batch norm, irrelevant if batch_norm set to False
        # set reuse to False if the first time this func is called.
        with tf.variable_scope(name):
            if input_tensor is None:
                l_in = make_input(shape=(None, self.obs_dim,), input_var=None, name='input')
            else:
                l_in = input_tensor

            
            bs = tf.shape(l_in)[0]
            conc_bias = tf.tile(all_params['bias_transformation'][None, :], (bs,1))
            l_hid = tf.concat([l_in, conc_bias], axis=1)
            
            for idx in range(self.n_hidden):
                l_hid = forward_dense_layer(l_hid, all_params['W'+str(idx)], all_params['b'+str(idx)],
                                            batch_norm=batch_normalization,
                                            nonlinearity=self.hidden_nonlinearity,
                                            scope=str(idx), reuse=reuse,
                                            is_training=is_training
                                            )
            output = forward_dense_layer(l_hid, all_params['W'+str(self.n_hidden)], all_params['b'+str(self.n_hidden)],
                                         batch_norm=False, nonlinearity=self.output_nonlinearity,
                                         )
            return l_in, output
Exemple #4
0
    def forward_MLP(self,
                    name,
                    all_params,
                    input_tensor=None,
                    batch_normalization=False,
                    reuse=True,
                    is_training=False):
        # is_training and reuse are for batch norm, irrelevant if batch_norm set to False
        # set reuse to False if the first time this func is called.
        with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
            if input_tensor is None:
                l_in = make_input(shape=self.input_shape,
                                  input_var=None,
                                  name='input')
            else:
                l_in = input_tensor

            image_input = tf.reshape(
                l_in[:, :3 * (self.im_x_dim * self.im_y_dim)],
                (-1, self.im_x_dim, self.im_y_dim, 3))
            robot_config = l_in[:, 3 * (self.im_x_dim * self.im_y_dim):]
            conc_bias = tf.tile(all_params['bias_transform'][None, :],
                                (tf.shape(l_in)[0], 1))

            conv_layer = image_input

            for i in range(self.n_conv_layers):

                conv_layer = norm(conv2d(img=conv_layer, w=all_params['conv_w%d' % (i+1)], b=all_params['conv_b%d' % (i+1)], strides=(1,)+self.strides[i]+(1,)), \
                                norm_type=self.norm_type, is_training = is_training , activation_fn=self.hidden_nonlinearity)

            #feature_points = tf.contrib.layers.spatial_softmax( conv_layer )
            feature_points = spatial_softmax(conv_layer)
            feature_points = tf.concat(
                [feature_points, robot_config, conc_bias], axis=1)

            fc_layer = feature_points

            for idx in range(self.n_fc_hidden_layers):
                fc_layer = forward_dense_layer(
                    fc_layer,
                    all_params['fc_w' + str(idx)],
                    all_params['fc_b' + str(idx)],
                    batch_norm=(self.norm_type == 'batch_norm'),
                    nonlinearity=self.hidden_nonlinearity,
                    reuse=tf.AUTO_REUSE,
                    is_training=is_training)
            output = forward_dense_layer(
                fc_layer,
                all_params['fc_w' + str(self.n_fc_hidden_layers)],
                all_params['fc_b' + str(self.n_fc_hidden_layers)],
                batch_norm=False,
                nonlinearity=self.output_nonlinearity,
            )

            return l_in, output
 def forward_MLP(self,
                 name,
                 all_params,
                 input_tensor=None,
                 input_shape=None,
                 n_hidden=-1,
                 hidden_nonlinearity=tf.identity,
                 output_nonlinearity=tf.identity,
                 batch_normalization=False,
                 reuse=True,
                 is_training=False):
     # is_training and reuse are for batch norm, irrelevant if batch_norm set to False
     # set reuse to False if the first time this func is called.
     with tf.variable_scope(name):
         if input_tensor is None:
             assert input_shape is not None
             l_in = make_input(shape=(None, ) + input_shape,
                               input_var=None,
                               name='input')
         else:
             l_in = input_tensor
         l_hid = l_in
         for idx in range(n_hidden):
             print('idx', idx, flush=True)
             l_hid = forward_dense_layer(l_hid,
                                         all_params['W' + str(idx)],
                                         all_params['b' + str(idx)],
                                         batch_norm=batch_normalization,
                                         nonlinearity=hidden_nonlinearity,
                                         scope=str(idx),
                                         reuse=reuse,
                                         is_training=is_training)
         output = forward_dense_layer(
             l_hid,
             all_params['W' + str(n_hidden)],
             all_params['b' + str(n_hidden)],
             batch_norm=False,
             nonlinearity=output_nonlinearity,
         )
         return l_in, output
 def forward_MLP(self, name, all_params, input_tensor=None, input_shape=None, n_hidden=-1,
                 hidden_nonlinearity=tf.identity, output_nonlinearity=tf.identity,
                 batch_normalization=False, reuse=True, is_training=False):
     # is_training and reuse are for batch norm, irrelevant if batch_norm set to False
     # set reuse to False if the first time this func is called.
     with tf.variable_scope(name):
         if input_tensor is None:
             assert input_shape is not None
             l_in = make_input(shape=(None,)+input_shape, input_var=None, name='input')
         else:
             l_in = input_tensor
         l_hid = l_in
         for idx in range(n_hidden):
             l_hid = forward_dense_layer(l_hid, all_params['W'+str(idx)], all_params['b'+str(idx)],
                                         batch_norm=batch_normalization,
                                         nonlinearity=hidden_nonlinearity,
                                         scope=str(idx), reuse=reuse,
                                         is_training=is_training
                                         )
         output = forward_dense_layer(l_hid, all_params['W'+str(n_hidden)], all_params['b'+str(n_hidden)],
                                      batch_norm=False, nonlinearity=output_nonlinearity,
                                      )
         return l_in, output
Exemple #7
0
    def forward_CNN_MLP(
            self,
            name,
            all_params,
            conv_filters,
            conv_filter_sizes,
            conv_strides,
            conv_pads,
            conv_output_dim,
            conv_hidden_sizes,  # new
            input_tensor=None,
            batch_normalization=False,
            reuse=True,
            is_training=False):
        # is_training and reuse are for batch norm, irrelevant if batch_norm set to False
        # set reuse to False if the first time this func is called.
        with tf.variable_scope(name):
            if input_tensor is None:
                l_in = make_input(shape=self.input_total_shape,
                                  input_var=None,
                                  name='input')
            else:
                l_in = input_tensor

            # l_img_in = l_in[:][:np.prod(self.input_img_shape)]
            l_img_in = tf.slice(l_in, [0, 0],
                                [-1, np.prod(self.input_img_shape)])
            # l_state_in = l_in[:][np.prod(self.input_img_shape):]
            l_state_in = tf.slice(l_in, [0, np.prod(self.input_img_shape)],
                                  [-1, -1])
            # print("Debug212",l_img_in, l_state_in)
            l_normalized_img_in = tf.cast(l_img_in, tf.float32) / 255
            # if self.cnn is None:
            self.cnn = ConvNetwork(
                name=name + "cnn",
                input_shape=self.input_img_shape,
                output_dim=conv_output_dim,
                conv_filters=conv_filters,
                conv_filter_sizes=conv_filter_sizes,
                conv_strides=conv_strides,
                conv_pads=conv_pads,
                hidden_sizes=conv_hidden_sizes,
                hidden_nonlinearity=tf.nn.relu,
                output_nonlinearity=L.spatial_expected_softmax,
                input_var=l_normalized_img_in)
            #     cnn = self.cnn
            # else:
            #     self.cnn2 = ConvNetwork(name=name+"cnn2",input_shape=self.input_img_shape,output_dim=conv_output_dim,
            #               conv_filters=conv_filters,conv_filter_sizes=conv_filter_sizes,conv_strides=conv_strides,
            #               conv_pads=conv_pads, hidden_sizes=conv_hidden_sizes,hidden_nonlinearity=tf.nn.relu,output_nonlinearity=L.spatial_expected_softmax, input_var=l_normalized_img_in)
            #     cnn = self.cnn2

            # print("debug234, cnn output layer", L.get_output(self.cnn._l_out))
            l_hid = tf.concat(
                [l_state_in, L.get_output(self.cnn.output_layer)], -1,
                'post_conv_input')
            # l_hid = tf.concat([l_state_in,l_normalized_img_in],-1,'post_conv_input')

            # l_hid=l_in
            for idx in range(self.n_hidden):
                l_hid = forward_dense_layer(
                    l_hid,
                    all_params['W' + str(idx)],
                    all_params['b' + str(idx)],
                    batch_norm=batch_normalization,
                    nonlinearity=self.hidden_nonlinearity,
                    scope=str(idx),
                    reuse=reuse,
                    is_training=is_training)
            output = forward_dense_layer(
                l_hid,
                all_params['W' + str(self.n_hidden)],
                all_params['b' + str(self.n_hidden)],
                batch_norm=False,
                nonlinearity=self.output_nonlinearity,
            )
            return l_in, output