Example #1
0
def sampling(args):
    z_mean = args[0]
    z_log_sigma = args[1]
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    #flattened_dim = functools.reduce(lambda x,y:x*y,[*dim,3])
    epsilon = tf.reshape(K.random_normal(shape=(batch, dim), dtype=tf.float32),
                         (batch, dim))
    xout = z_mean + K.exp(z_log_sigma) * epsilon
    return xout
Example #2
0
def sampling(args):
    """Reparameterization trick by sampling fr an isotropic unit Gaussian.
    # Arguments
        args (tensor): mean and log of variance of Q(z|X)
    # Returns
        z (tensor): sampled latent vector
    """

    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon
Example #3
0
    def buildNetwork(self):
        """ Assemble shared layers
        """
        self.input_env = Input(self.env_dim, name="env_state")
        self.input_pos = Input((2, ), name="env_pos")
        self.input_knowledge = Input((256, ), name="prev_knowledge")
        self.input_action = Input(shape=(self.act_dim, ), name="action")

        knowledge_update = self.build_tower(self.input_env, self.input_pos)

        self.update_knowledge_layer = UpdateKnowledgeLayer()
        self.knowledge = self.update_knowledge_layer(
            [self.input_knowledge, knowledge_update])

        #height, width = int(self.knowledge.shape[1]), int(self.knowledge.shape[2].value)

        #        pose_channel = Lambda(lambda x: K.ones((K.shape(x)[0],1,1,1)) )(self.knowledge)
        #       pose_channel = ZeroPadding2D((3, 3))(pose_channel)
        #        pose_channel = Lambda(apply_affine, apply_affine_output_shape)([pose_channel, self.input_pos])

        #query = Concatenate(axis=-1)([self.knowledge, self.input_pos])
        #        query = Conv2D(filters=self.r_channels, kernel_size=3, strides=1, padding="SAME", activation=None)(query)
        #        query = BatchNormalization()(query)
        #        query = Activation("relu")(query)

        stddev = Dense(4,
                       activation="relu",
                       kernel_initializer="zero",
                       bias_initializer=Constant(1e3))(self.knowledge)

        stddev = Lambda(lambda x: K.random_normal(K.shape(x)) * x)(stddev)

        values = Dense(4, activation="linear")(self.knowledge)

        values = Add()([values, stddev])

        self.prediction_model = Model(
            [self.input_env, self.input_pos, self.input_knowledge],
            [values, self.knowledge])

        predicted_for_action = Dot(axes=-1)([values, self.input_action])

        self.train_model = Model(inputs=[
            self.input_env, self.input_pos, self.input_knowledge,
            self.input_action
        ],
                                 outputs=[predicted_for_action])

        pass
Example #4
0
 def sample_z(args):
     z_m, z_l_s = args
     eps = K.random_normal(shape=(batch_size, self.latent_shape),
                           mean=0.,
                           std=1.)
     return z_m + K.exp(z_l_s / 2) * eps
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(latent_dim, ), mean=0., std=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon