Esempio n. 1
0
 def update_memory(self, x):
     update = []
     for m in self.blocks:
         mi, up = m( x, is_training = True )
         fmi = flatten( mi )
         x = fmi
         update.extend( up ) 
     return update
Esempio n. 2
0
    def __encode(self, x):

        x = self.c1(x, is_training=self.is_training)
        x = self.c2(x, is_training=self.is_training)
        x = self.c3(x, is_training=self.is_training)
        x = self.c4(x, is_training=self.is_training)
        x = flatten(x)

        return self.nm(x, is_training=self.is_training)
Esempio n. 3
0
    def __call__(self, x, h, is_training=False):

        x, xu = self.blockx(x, is_training=is_training)
        h, hu = self.blockh(h, is_training=is_training)

        if not self.polling is None:
            x = maxpool2d(x, self.polling)
            h = maxpool2d(h, self.polling)

        x = flatten(x)
        h = flatten(h)
        xh = tf.concat([x, h], axis=1)

        m, mu = self.blockm(xh, is_training=is_training)

        print(m)
        if is_training:
            return x, h, m, (*xu, *hu, *mu)
        return x, h, m
Esempio n. 4
0
 def __call__(self, x):
     mems = None
     for m in self.blocks:
         mi = m( x, is_training = False )
         fmi = flatten( mi )
         if mems is None:
             mems = mi
         else:
             mems = tf.concat( [ mems, mi ], axis = 3 )
         x = fmi        
     return mems
Esempio n. 5
0
    def _build_encoder(self, x, is_training=False, summary=False):

        encoded = x
        for n in self.encoder:
            encoded = n(encoded, is_training=is_training)

        features = flatten(encoded)

        z_mu = self.ef1(features, is_training, 1024)
        z_mu = self.ef11(z_mu, is_training)

        z_log_sigma_sq = self.ef2(features, is_training, 1024)
        z_log_sigma_sq = self.ef21(z_log_sigma_sq, is_training)

        return z_mu, z_log_sigma_sq, tf.shape(encoded), features.shape[1]
Esempio n. 6
0
    def _build_encoder(self, x, is_training=False, summary=False):

        attn_encoded, attn_vars = self.attn(x, True, summary, 2)
        attn_encoded = ln(x + attn_encoded)

        encoded1 = attn_encoded
        encoded2 = self.ec1(encoded1, is_training=is_training)
        encoded3 = self.ec2(encoded2, is_training=is_training)
        encoded4 = self.ec3(encoded3, is_training=is_training)
        encoded5 = self.ec4(encoded4, is_training=is_training)

        features = flatten(encoded5)

        z_mu = self.ef1(features, is_training)
        z_mu = self.ef11(z_mu, is_training, self.size)

        z_log_sigma_sq = self.ef2(features, is_training)
        z_log_sigma_sq = self.ef21(z_log_sigma_sq, is_training, self.size)

        if summary:

            tf.summary.image(family='vae_values',
                             name='encoder_input',
                             tensor=attn_encoded,
                             max_outputs=1)

            tf.summary.histogram(family='vae_layers',
                                 name="e_l1",
                                 values=encoded1)
            tf.summary.histogram(family='vae_layers',
                                 name="e_l2",
                                 values=encoded2)
            tf.summary.histogram(family='vae_layers',
                                 name="e_l4",
                                 values=encoded3)
            tf.summary.histogram(family='vae_layers',
                                 name="e_l4",
                                 values=encoded4)
            tf.summary.histogram(family='vae_layers',
                                 name="e_l5",
                                 values=encoded5)
            tf.summary.histogram(family='vae_layers', name="mu", values=z_mu)
            tf.summary.histogram(family='vae_layers',
                                 name="log_sigma",
                                 values=z_log_sigma_sq)

        return attn_encoded, attn_vars, z_mu, z_log_sigma_sq, tf.shape(
            encoded5), features.shape[1]
Esempio n. 7
0
    def update_memory(self, x, h):

        update = []
        for m in self.blocks:

            # retive memory information
            xi, hi, mi, up = m(x, h, is_training=True)
            fmi = flatten(mi)

            # create input for next layer
            x = tf.concat([xi, fmi], axis=1)
            h = tf.concat([hi, fmi], axis=1)

            update.extend(up)

        return update
Esempio n. 8
0
    def __call__(self, x, h):

        mems = None
        for m in self.blocks:

            # retive memory information
            xi, hi, mi = m(x, h, is_training=False)
            fmi = flatten(mi)

            # store main memory
            if mems is None:
                mems = mi
            else:
                mems = tf.concat([mems, mi], axis=3)

            # create input for next layer
            x = tf.concat([xi, fmi], axis=1)
            h = tf.concat([hi, fmi], axis=1)

        return mems