Example #1
0
 def meanf(m):
     mf, ml = [], self.v_
     for wl, wr, mr in zip(self.w, self.w[1:], m[1:]):
         mf.append(tf.sigmoid(tf.matmul(ml, wl) + tf.matmul(mr, wr, transpose_b= True)))
         ml = mf[-1]
     mf.append(tf.sigmoid(tf.matmul(ml, wr)))
     return tuple(mf)
Example #2
0
File: rbm.py Project: ysmiraak/lgm
    def __init__(self, dim_v, dim_h, samples
                 , init_w= tf.random_uniform_initializer(minval= -0.01, maxval= 0.01)
                 , ftype= tf.float32
                 , scope= 'rbm'):
        self.dim_v, self.dim_h, self.ftype, self.scope = dim_v, dim_h, ftype, scope
        with tf.variable_scope(scope):
            # todo add bias
            self.w = tf.get_variable(name= 'w', shape= (self.dim_v, self.dim_h), initializer= init_w)
            # positive stage: inference
            self.v_ = tf.placeholder(name= 'v_', dtype= self.ftype, shape= (None, self.dim_v))
            with tf.name_scope('hgv'):
                self.hgv = tf.sigmoid(tf.matmul(self.v_, self.w))
            # self.act_h = binary(self.hgv, transform= False, threshold= None)
            # self.h_ = tf.placeholder(name= 'h_', dtype= self.ftype, shape= (None, self.dim_h))
            # self.vgh = tf.matmul(self.h_, self.w, transpose_b= True)
            # self.act_v = binary(self.vgh, transform= False, threshold= None)

            with tf.name_scope('pos'):
                self.pos = tf.matmul(self.v_, self.hgv, transpose_a= True)
                self.pos /= tf.cast(tf.shape(self.v_)[0], dtype= self.ftype)
            # negative stage: stochastic approximation
            self.v = binary_variable(name= 'v', shape= (samples, self.dim_v), dtype= self.ftype)
            self.h = binary_variable(name= 'h', shape= (samples, self.dim_h), dtype= self.ftype)
            self.k_ = tf.placeholder(name= 'k_', dtype= tf.int32, shape= ())

            def gibbs(v, _h):
                h = binary(tf.matmul(v, self.w))
                v = binary(tf.matmul(h, self.w, transpose_b= True))
                # todo real valued v
                # v = tf.sigmoid(tf.matmul(h, self.w, transpose_b= True))
                return v, h

            with tf.name_scope('gibbs'):
                vh = self.v, self.h
                v, h = self.gibbs = tuple(
                    tf.assign(x, x2, validate_shape= False) for x, x2 in zip(
                        vh, tf.while_loop(
                            loop_vars= (self.k_, vh)
                            , cond= lambda k, vh: (0 < k)
                            , body= lambda k, vh: (k - 1, gibbs(*vh)))[1]))

            with tf.name_scope('neg'):
                # todo update with real probabilities instead of binaries
                h = tf.sigmoid(tf.matmul(v, self.w))
                v = tf.sigmoid(tf.matmul(h, self.w, transpose_b= True))
                self.neg = tf.matmul(v, h, transpose_a= True)
                self.neg /= tf.cast(tf.shape(self.v)[0], dtype= self.ftype)
            self.lr_ = tf.placeholder(name= 'lr_', dtype= self.ftype, shape= ())
            with tf.name_scope('up'):
                self.up = self.w.assign_add((self.pos - self.neg) * self.lr_).op
            self.step = 0
Example #3
0
File: dbn.py Project: ysmiraak/lgm
 def __init__(self, dim, samples
              , init_w= tf.random_uniform_initializer(minval= -0.01, maxval= 0.01)
              , ftype= tf.float32, scope= 'dbn'):
     self.dim, self.ftype = dim, ftype
     with tf.variable_scope(scope):
         self.rbm = tuple(
             Rbm(scope= "rbm{}".format(i)
                 , dim_v= dim_v
                 , dim_h= dim_h
                 , samples= samples
                 , init_w= init_w
                 , ftype= self.ftype)
             for i, (dim_v, dim_h) in enumerate(zip(dim, dim[1:]), 1))
         self.w = tuple(rbm.w for rbm in self.rbm[::-1])
         self.wg = tuple(tf.transpose(w) for w in self.w)
         self.wr = tuple(
             tf.get_variable(name= "wr{}".format(i), shape= (dim_d, dim_a), initializer= init_w)
             for i, (dim_d, dim_a) in enumerate(zip(self.dim, self.dim[1:]), 1))
         self.lr_ = tf.placeholder(name= 'lr_', dtype= self.ftype, shape= ())
         # wake
         self.v_ = self.rbm[0].v_
         with tf.name_scope('wake'):
             recogn = [self.v_]
             for w in self.wr: recogn.append(binary(tf.matmul(recogn[-1], w)))
             self.recogn = tuple(recogn)
             recogn = recogn[::-1]
             eps = self.lr_ / tf.cast(tf.shape(self.v_)[0], dtype= self.ftype)
             self.wake = tuple(
                 w.assign_add(tf.matmul((sj - pj), sk, transpose_a= True) * eps).op
                 for w, sk, sj, pj in zip(
                         self.w, recogn, recogn[1:]
                         , (tf.sigmoid(tf.matmul(s, w))
                            for w, s in zip(self.wg, recogn))))
         # sleep
         top = self.rbm[-1]
         self.k_, (self.v, self.a) = top.k_, top.gibbs
         with tf.name_scope('sleep'):
             recons = [self.a, self.v]
             for w in self.wg[1::]: recons.append(binary(tf.matmul(recons[-1], w)))
             self.recons = tuple(recons)
             recons = recons[::-1]
             eps = self.lr_ / tf.cast(tf.shape(self.a)[0], dtype= self.ftype)
             self.sleep = tuple(
                 w.assign_add(tf.matmul(sj, (sk - qk), transpose_a= True) * eps).op
                 for w, sj, sk, qk in zip(
                         self.wr, recons, recons[1:]
                         , (tf.sigmoid(tf.matmul(s, w))
                            for w, s in zip(self.wr, recons))))
         # the waking world is the amnesia of dream.
         self.v = self.recons[-1]
         with tf.name_scope('ances'):
             self.a = self.rbm[-1].h
             ances = [self.a]
             for w in self.wg: ances.append(binary(tf.matmul(ances[-1], w)))
             self.ances = ances[-1]
         self.step = 0
Example #4
0
def LSTM_Sentiment(input_tensor):

    #  Reference Paper: https://www.bioinf.jku.at/publications/older/2604.pdf

    lstmCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell(1024)
    output_rnn, _ = tf.compat.v1.nn.dynamic_rnn(lstmCell,
                                                input_tensor,
                                                dtype=tf.float32)

    W_fc = tf.Variable(tf.random.truncated_normal([1024, 2]))
    b_fc = tf.Variable(tf.constant(0.1, shape=[2]))

    output_transposed = tf.transpose(output_rnn, perm=[1, 0, 2])
    output = tf.gather(output_transposed,
                       int(output_transposed.get_shape()[0]) - 1)

    return tf.identity(tf.matmul(output, W_fc) + b_fc, name="output")
Example #5
0
 def gibbs(x):
     x = list(x)
     # update odd layers
     for i, (xl, xr, wl, wr) in enumerate(zip(x[::2], x[2::2], self.w, self.w[1:])):
         x[1+(2*i)] = binary(tf.matmul(xl, wl) + tf.matmul(xr, wr, transpose_b= True))
     # update first layer
     x[0] = binary(tf.matmul(x[1], self.w[0], transpose_b= True))
     # update even layers
     for i, (xl, xr, wl, wr) in enumerate(zip(x[1::2], x[3::2], self.w[1:], self.w[2:])):
         x[2+(2*i)] = binary(tf.matmul(xl, wl) + tf.matmul(xr, wr, transpose_b= True))
     # update last layer
     x[-1] = binary(tf.matmul(x[-2], self.w[-1]))
     return tuple(x)
Example #6
0
File: dbn.py Project: ysmiraak/lgm
 def pre(self, sess, wtr, batchit, k= 4, lr= 0.01, steps= 0, step_plot= 0, sleep= 0):
     h2v = lambda x: x
     for rbm in self.rbm:
         # plot function from this rbm down to the bottom
         rbm.plot = plot_fn(rbm.scope)
         plot = lambda sess, wtr, v, step= None, rbm= rbm: rbm.plot(
             sess, wtr, step= rbm.step if step is None else step
             , v= h2v(v))
         # train this rbm
         rbm.pcd(sess, wtr, batchit, k= k, lr= lr, steps= steps, step_plot= step_plot, plot= plot)
         # downward closure of this rbm, to be used by the next plot function
         rbm.h2v = binary(tf.matmul(rbm.h, rbm.w, transpose_b= True))
         h2v = lambda h, rbm= rbm, h2v= h2v: h2v(sess.run(rbm.h2v, feed_dict= {rbm.h: h}))
         # # generate hidden states from this rbm
         # batchit = rbm.gen(sess, k= k, ret_v= False, ret_h= True)
         # upward closure of this rbm, translating visibles to hiddens
         rbm.v2h = binary(rbm.hgv, transform= False, threshold= False)
         v2h = lambda v, rbm= rbm: sess.run(rbm.v2h, feed_dict= {rbm.v_: v})
         batchit = map(v2h, batchit)
     for _ in range(sleep): sess.run(self.sleep, feed_dict= {self.k_: k, self.lr_: lr})
Example #7
0
File: sbn.py Project: ysmiraak/lgm
 def __init__(self, dim, samples
              , init_w= tf.random_uniform_initializer(minval= -0.01, maxval= 0.01)
              , ftype= tf.float32, scope= 'sbn'):
     self.dim, self.ftype, self.scope = dim, ftype, scope
     with tf.variable_scope(scope):
         self.wr = tuple(
             tf.get_variable(name= "wr{}".format(i), shape= (dim_d, dim_a), initializer= init_w)
             for i, (dim_d, dim_a) in enumerate(zip(self.dim, self.dim[1:]), 1))
         self.wg = tuple(
             tf.get_variable(name= "wg{}".format(i), shape= (dim_a, dim_d), initializer= init_w)
             for i, (dim_d, dim_a) in enumerate(zip(self.dim, self.dim[1:]), 1))[::-1]
         self.lr_ = tf.placeholder(name= 'lr_', dtype= self.ftype, shape= ())
         # wake
         self.v_ = tf.placeholder(name= 'v_', dtype= self.ftype, shape= (None, self.dim[0]))
         with tf.name_scope('wake'):
             recogn = [self.v_]
             for w in self.wr: recogn.append(binary(tf.matmul(recogn[-1], w)))
             self.recogn = tuple(recogn)
             recogn = recogn[::-1]
             eps = self.lr_ / tf.cast(tf.shape(self.v_)[0], dtype= self.ftype)
             self.wake = tuple(
                 w.assign_add(tf.matmul(sk, (sj - pj), transpose_a= True) * eps).op
                 for w, sk, sj, pj in zip(
                         self.wg, recogn, recogn[1:]
                         , (tf.sigmoid(tf.matmul(s, w))
                            for w, s in zip(self.wg, recogn))))
         # sleep
         with tf.name_scope('a'):
             self.a = tf.round(tf.random_uniform(shape= (samples, self.dim[-1])))
         with tf.name_scope('sleep'):
             recons = [self.a]
             for w in self.wg: recons.append(binary(tf.matmul(recons[-1], w)))
             self.recons = tuple(recons)
             recons = recons[::-1]
             eps = self.lr_ / tf.cast(tf.shape(self.a)[0], dtype= self.ftype)
             self.sleep = tuple(
                 w.assign_add(tf.matmul(sj, (sk - qk), transpose_a= True) * eps).op
                 for w, sj, sk, qk in zip(
                         self.wr, recons, recons[1:]
                         , (tf.sigmoid(tf.matmul(s, w))
                            for w, s in zip(self.wr, recons))))
         # the waking world is the amnesia of dream.
         self.v = self.recons[-1]
         self.step = 0
Example #8
0
File: rbm.py Project: ysmiraak/lgm
 def gibbs(v, _h):
     h = binary(tf.matmul(v, self.w))
     v = binary(tf.matmul(h, self.w, transpose_b= True))
     # todo real valued v
     # v = tf.sigmoid(tf.matmul(h, self.w, transpose_b= True))
     return v, h
Example #9
0
    def __init__(self, dim, samples
                 , init_w= tf.random_uniform_initializer(minval= -0.01, maxval= 0.01)
                 , ftype= tf.float32, scope= 'dbm'):
        self.dim, self.ftype = dim, ftype
        # todo pretraining
        with tf.variable_scope(scope):
            self.rbm = tuple(
                Rbm(scope= "rbm{}".format(i)
                    , dim_v= dim_v
                    , dim_h= dim_h
                    , samples= samples
                    , init_w= init_w
                    , ftype= self.ftype)
                for i, (dim_v, dim_h) in enumerate(zip(dim, dim[1:]), 1))
            self.w = tuple(rbm.w for rbm in self.rbm)
            # positive stage: variational inference
            self.m = tuple(rbm.h for rbm in self.rbm)
            self.v_ = self.rbm[0].v_
            self.k_meanf_ = tf.placeholder(name= 'k_meanf_', dtype= tf.int32, shape= ())

            def meanf(m):
                mf, ml = [], self.v_
                for wl, wr, mr in zip(self.w, self.w[1:], m[1:]):
                    mf.append(tf.sigmoid(tf.matmul(ml, wl) + tf.matmul(mr, wr, transpose_b= True)))
                    ml = mf[-1]
                mf.append(tf.sigmoid(tf.matmul(ml, wr)))
                return tuple(mf)

            with tf.name_scope('meanf'):
                self.meanf = tuple(
                    tf.assign(m, mf, validate_shape= False) for m, mf in zip(
                        self.m, tf.while_loop(
                            loop_vars= (self.k_meanf_, self.m)
                            , cond= lambda k, _: (0 < k)
                            , body= lambda k, m: (k - 1, meanf(m)))[1]))

            with tf.name_scope('pos'):
                bs = tf.cast(tf.shape(self.v_)[0], dtype= self.ftype)
                vm = (self.v_,) + self.meanf
                self.pos = tuple((tf.matmul(ml, mr, transpose_a= True) / bs) for ml, mr in zip(vm, vm[1:]))
            # negative stage: stochastic approximation
            self.x = tuple(rbm.v for rbm in self.rbm)
            self.x += (binary_variable(name= 'x', shape= (samples, self.dim[-1]), dtype= self.ftype),)
            self.v = self.x[0]
            self.k_gibbs_ = tf.placeholder(name= 'k_gibbs_', dtype= tf.int32, shape= ())

            def gibbs(x):
                x = list(x)
                # update odd layers
                for i, (xl, xr, wl, wr) in enumerate(zip(x[::2], x[2::2], self.w, self.w[1:])):
                    x[1+(2*i)] = binary(tf.matmul(xl, wl) + tf.matmul(xr, wr, transpose_b= True))
                # update first layer
                x[0] = binary(tf.matmul(x[1], self.w[0], transpose_b= True))
                # update even layers
                for i, (xl, xr, wl, wr) in enumerate(zip(x[1::2], x[3::2], self.w[1:], self.w[2:])):
                    x[2+(2*i)] = binary(tf.matmul(xl, wl) + tf.matmul(xr, wr, transpose_b= True))
                # update last layer
                x[-1] = binary(tf.matmul(x[-2], self.w[-1]))
                return tuple(x)

            with tf.name_scope('gibbs'):
                x = self.gibbs = tuple(
                    tf.assign(x, xg, validate_shape= False) for x, xg in zip(
                        self.x, tf.while_loop(
                            loop_vars= (self.k_gibbs_, self.x)
                            , cond= lambda k, x: (0 < k)
                            , body= lambda k, x: (k - 1, gibbs(x)))[1]))

            with tf.name_scope('neg'):
                bs = tf.cast(tf.shape(self.v)[0], dtype= self.ftype)
                self.neg = tuple((tf.matmul(xl, xr, transpose_a= True) / bs) for xl, xr in zip(x, x[1:]))
            # parameter update
            self.lr_ = tf.placeholder(name= 'lr_', dtype= self.ftype, shape= ())
            with tf.name_scope('up'):
                self.up = tuple(
                    w.assign_add((pos - neg) * self.lr_).op
                    for w, pos, neg in zip(self.w, self.pos, self.neg))
            self.step = 0