示例#1
0
    def __init__(self, sess, n_features, lr=0.01):
        self.sess = sess
        self.s = tf.placeholder(tf.float32, [1, n_features], "state")
        self.v_ = tf.placeholder(tf.float32, [1, 1], "v_next")
        self.r = tf.placeholder(tf.float32, None, 'r')

        with tf.variable_scope(
                'Critic'
        ):  # we use Value-function here, not Action-Value-function
            n = InputLayer(self.s, name='in')
            n = DenseLayer(n,
                           n_units=30,
                           act=tf.nn.relu6,
                           W_init=tf.random_uniform_initializer(0, 0.01),
                           name='hidden')
            # n = DenseLayer(n, n_units=5, act=tf.nn.relu, W_init=tf.random_uniform_initializer(0, 0.01), name='hidden2')
            n = DenseLayer(n, n_units=1, act=tf.identity, name='V')
            self.v = n.outputs

        with tf.variable_scope('squared_TD_error'):
            # TD_error = r + lambd * V(newS) - V(S)
            self.td_error = self.r + LAMBDA * self.v_ - self.v
            self.loss = tf.square(self.td_error)

        with tf.variable_scope('train'):
            self.train_op = tf.train.AdamOptimizer(lr).minimize(self.loss)
示例#2
0
    def build_classifier(self, im, inf_norm, reuse=False):
        with tf.variable_scope("C", reuse=reuse) as vs:
            x = tf.reshape(im, [-1, 64, 64, 3])
            xmin = tf.clip_by_value(x - inf_norm, 0., 1.)
            xmax = tf.clip_by_value(x + inf_norm, 0., 1.)
            x = tf.random_uniform(tf.shape(x), xmin, xmax, dtype=tf.float32)

            #x = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), x)

            net = InputLayer(x)
            n_filters = 3
            for i in range(2):
                net = Conv2dLayer(net, \
                        act=tf.nn.relu, \
                        shape=[5,5,n_filters,64], \
                        name="conv_" + str(i))
                net = MaxPool2d(net, \
                        filter_size=(3,3), \
                        strides=(2,2), \
                        name="mpool_" + str(i))
                net = LocalResponseNormLayer(net, \
                        depth_radius=4, \
                        bias=1.0, \
                        alpha=0.001 / 9.0, \
                        beta=0.75, \
                        name="lrn_" + str(i))
                n_filters = 64
            net = FlattenLayer(net)
            net = DenseLayer(net, n_units=384, act=tf.nn.relu, name="d1")
            net = DenseLayer(net, n_units=192, act=tf.nn.relu, name="d2")
            net = DenseLayer(net, n_units=2, act=tf.identity, name="final")
            cla_vars = tf.contrib.framework.get_variables(vs)
        return net.outputs, cla_vars
def model(x, is_train):
    with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
        net = InputLayer(x, name='input')
        net = Conv2d(net,
                     64, (5, 5), (1, 1),
                     padding='SAME',
                     b_init=None,
                     name='cnn1')
        net = BatchNormLayer(net,
                             decay=0.99,
                             is_train=is_train,
                             act=tf.nn.relu,
                             name='batch1')
        net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')

        net = Conv2d(net,
                     64, (5, 5), (1, 1),
                     padding='SAME',
                     b_init=None,
                     name='cnn2')
        net = BatchNormLayer(net,
                             decay=0.99,
                             is_train=is_train,
                             act=tf.nn.relu,
                             name='batch2')
        net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')

        net = FlattenLayer(net, name='flatten')
        net = DenseLayer(net, 384, act=tf.nn.relu, name='d1relu')
        net = DenseLayer(net, 192, act=tf.nn.relu, name='d2relu')
        net = DenseLayer(net, 10, act=None, name='output')
    return net
示例#4
0
def celebA_classifier(ims, reuse):
    with tf.variable_scope("C", reuse=reuse) as vs:
        net = InputLayer(ims)
        n_filters = 3
        for i in range(2):
            net = Conv2dLayer(net, \
                    act=tf.nn.relu, \
                    shape=[5,5,n_filters,64], \
                    name="conv_" + str(i))
            net = MaxPool2d(net, \
                    filter_size=(3,3), \
                    strides=(2,2), \
                    name="mpool_" + str(i))
            net = LocalResponseNormLayer(net, \
                    depth_radius=4, \
                    bias=1.0, \
                    alpha=0.001 / 9.0, \
                    beta=0.75, \
                    name="lrn_" + str(i))
            n_filters = 64
        net = FlattenLayer(net)
        net = DenseLayer(net, n_units=384, act=tf.nn.relu, name="d1")
        net = DenseLayer(net, n_units=192, act=tf.nn.relu, name="d2")
        net = DenseLayer(net, n_units=2, act=tf.identity, name="final")
        cla_vars = tf.contrib.framework.get_variables(vs)
        if not reuse:
            return net.outputs, tf.argmax(net.outputs, axis=1), cla_vars
    return net.outputs, tf.argmax(net.outputs, axis=1)
示例#5
0
def model(x, is_train, reuse):
    with tf.variable_scope("STN", reuse=reuse):
        nin = InputLayer(x, name='in')
        ## 1. Localisation network
        # use MLP as the localisation net
        nt = FlattenLayer(nin, name='flatten')
        nt = DenseLayer(nt, n_units=20, act=tf.nn.tanh, name='dense1')
        nt = DropoutLayer(nt, 0.8, True, is_train, name='drop1')
        # you can also use CNN instead for MLP as the localisation net
        # nt = Conv2d(nin, 16, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='tc1')
        # nt = Conv2d(nt, 8, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='tc2')
        ## 2. Spatial transformer module (sampler)
        n = SpatialTransformer2dAffineLayer(nin,
                                            nt,
                                            out_size=[40, 40],
                                            name='spatial')
        s = n
        ## 3. Classifier
        n = Conv2d(n,
                   16, (3, 3), (2, 2),
                   act=tf.nn.relu,
                   padding='SAME',
                   name='conv1')
        n = Conv2d(n,
                   16, (3, 3), (2, 2),
                   act=tf.nn.relu,
                   padding='SAME',
                   name='conv2')
        n = FlattenLayer(n, name='flatten2')
        n = DenseLayer(n, n_units=1024, act=tf.nn.relu, name='out1')
        n = DenseLayer(n, n_units=10, act=tf.identity, name='out2')
    return n, s
示例#6
0
def model_batch_norm(x_crop, y_, is_train, reuse):
    W_init = tf.truncated_normal_initializer(stddev=5e-2)
    W_init2 = tf.truncated_normal_initializer(stddev=0.04)
    b_init2 = tf.constant_initializer(value=0.1)
    with tf.variable_scope("model", reuse=reuse):
        net = InputLayer(x_crop, name='input')
        net = Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='cnn1')
        net = BatchNormLayer(net, decay=0.99, is_train=is_train, act=tf.nn.relu, name='batch1')
        net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')

        net = Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='cnn2')
        net = BatchNormLayer(net, decay=0.99, is_train=is_train, act=tf.nn.relu, name='batch2')
        net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')

        net = FlattenLayer(net, name='flatten')
        net = DenseLayer(net, 384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu')
        net = DenseLayer(net, 192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu')
        net = DenseLayer(net, n_units=10, act=None, W_init=W_init2, name='output')
        y = net.outputs

        ce = tl.cost.cross_entropy(y, y_, name='cost')
        # L2 for the MLP, without this, the accuracy will be reduced by 15%.
        L2 = 0
        for p in tl.layers.get_variables_with_name('relu/W', True, True):
            L2 += tf.contrib.layers.l2_regularizer(0.004)(p)
        cost = ce + L2

        correct_prediction = tf.equal(tf.cast(tf.argmax(y, 1), tf.int32), y_)
        acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        return net, cost, acc
示例#7
0
    def __init__(self, sess, n_features, n_actions, lr=0.001):
        self.sess = sess
        self.s = tf.placeholder(tf.float32, [1, n_features], "state")
        self.a = tf.placeholder(tf.int32, [None], "act")
        self.td_error = tf.placeholder(tf.float32, [None],
                                       "td_error")  # TD_error

        with tf.variable_scope('Actor'):  # Policy network
            n = InputLayer(self.s, name='in')
            n = DenseLayer(n,
                           n_units=30,
                           act=tf.nn.relu6,
                           W_init=tf.random_uniform_initializer(0, 0.01),
                           name='hidden')
            # n = DenseLayer(n, n_units=10, act=tf.nn.relu6, W_init=tf.random_uniform_initializer(0, 0.01), name='hidden2')
            n = DenseLayer(n, n_units=n_actions, name='Pi')
            self.acts_logits = n.outputs
            self.acts_prob = tf.nn.softmax(self.acts_logits)

        # Hao Dong
        with tf.variable_scope('loss'):
            self.exp_v = tl.rein.cross_entropy_reward_loss(
                logits=self.acts_logits,
                actions=self.a,
                rewards=self.td_error,
                name='actor_weighted_loss')

        with tf.variable_scope('train'):
            self.train_op = tf.train.AdamOptimizer(lr).minimize(self.exp_v)
def model(x, y_, reuse, is_train=False):
    W_init = tf.truncated_normal_initializer(stddev=5e-2)
    W_init2 = tf.truncated_normal_initializer(stddev=0.04)
    b_init2 = tf.constant_initializer(value=0.1)
    with tf.variable_scope("model", reuse=reuse):
        tl.layers.set_name_reuse(reuse)
        net = InputLayer(x, name='input')
        net = Conv2d(net,
                     32, (3, 3), (1, 1),
                     act=tf.nn.relu,
                     padding='SAME',
                     W_init=W_init,
                     name='cnn1')
        net = Conv2d(net,
                     32, (3, 3), (1, 1),
                     act=tf.nn.relu,
                     W_init=W_init,
                     name='cnn2',
                     padding="VALID")
        net = MaxPool2d(net, name='pool1', padding="VALID")
        net = DropoutLayer(net, keep=0.75, is_train=is_train, name='drop1')

        net = Conv2d(net,
                     64, (3, 3), (1, 1),
                     act=tf.nn.relu,
                     padding='SAME',
                     W_init=W_init,
                     name='cnn3')
        net = Conv2d(net,
                     64, (3, 3), (1, 1),
                     act=tf.nn.relu,
                     W_init=W_init,
                     name='cnn4',
                     padding="VALID")
        net = MaxPool2d(net, name='pool2', padding="VALID")
        net = DropoutLayer(net, keep=0.75, is_train=is_train, name='drop2')

        net = FlattenLayer(net, name='flatten')
        net = DenseLayer(net,
                         n_units=512,
                         act=tf.nn.relu,
                         W_init=W_init2,
                         b_init=b_init2,
                         name='d1relu')
        net = DenseLayer(net,
                         n_units=10,
                         act=tf.identity,
                         W_init=tf.truncated_normal_initializer(stddev=1 /
                                                                192.0),
                         name='output')  # output: (batch_size, 10)
        y = net.outputs

        loss = tl.cost.cross_entropy(y, y_, name='cost')

        correct_prediction = tf.equal(tf.argmax(y, 1), y_)
        acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        return net, loss, acc
示例#9
0
def discriminator(inputs, is_train=True):
    with tf.variable_scope("discriminator", reuse=tf.AUTO_REUSE):
        net_in = InputLayer(inputs, name='din')

        #Conv2d is tf.nn.conv2d + tf.nn.relu
        dnet_c0 = Conv2d(net_in,
                         64, (8, 8), (2, 2),
                         act=tf.nn.relu,
                         padding='SAME',
                         name='dnet_c0')

        #Conv2d is tf.nn.conv2d
        #BatchNormLayer is tf.nn.batch_normalization + tf.nn.relu
        dnet_c1 = Conv2d(dnet_c0,
                         128, (8, 8), (2, 2),
                         act=None,
                         padding='SAME',
                         name='dnet_c1')
        dnet_b1 = BatchNormLayer(dnet_c1,
                                 decay=0.9,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 name='dnet_b1')

        #    dnet_p1 = MaxPool2d(dnet_b1, (2, 2), name='pool2')   #Don't use pool layer, it is not good. But you can try.

        dnet_c2 = Conv2d(dnet_b1,
                         256, (8, 8), (2, 2),
                         act=None,
                         padding='SAME',
                         name='dnet_c2')
        dnet_b2 = BatchNormLayer(dnet_c2,
                                 decay=0.9,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 name='dnet_b2')

        dnet_c3 = Conv2d(dnet_b2,
                         512, (8, 8), (2, 2),
                         act=None,
                         padding='SAME',
                         name='dnet_c3')
        dnet_b3 = BatchNormLayer(dnet_c3,
                                 decay=0.9,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 name='dnet_b3')

        #FlattenLayer is tf.reshape
        dnet_f1 = FlattenLayer(dnet_b3, name='dnet_f1')
        #DenseLayer is tf.layers.dense, the full-connected
        dnet_d1 = DenseLayer(dnet_f1,
                             n_units=1,
                             act=tf.identity,
                             name='dnet_h4')
        logits = dnet_d1.outputs
        dnet_d1.outputs = tf.nn.sigmoid(dnet_d1.outputs)
    return dnet_d1, logits
示例#10
0
    def build_classifier(self, im, inf_norm, reuse=False):
        with tf.variable_scope('C', reuse=reuse) as vs:
            tensorlayer.layers.set_name_reuse(reuse)
            
            x = tf.reshape(im, [-1, 3, 32, 32])
            x = tf.transpose(x, [0, 2, 3, 1])
            
            xmin = tf.clip_by_value(x - inf_norm, 0., 1.)
            xmax = tf.clip_by_value(x + inf_norm, 0., 1.)
            x = tf.random_uniform(tf.shape(x), xmin, xmax, dtype=tf.float32)
            
            # Crop the central [height, width] of the image.
            # x = tf.image.resize_image_with_crop_or_pad(x, 24, 24)
            x = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), x)
            
            net = InputLayer(x)
            net = Conv2dLayer(net, \
                    act=tf.nn.relu, \
                    shape=[5,5,3,64], \
                    name="conv1")
            net = MaxPool2d(net, \
                    filter_size=(3,3), \
                    strides=(2,2), \
                    name="pool1")
            net = LocalResponseNormLayer(net, \
                    depth_radius=4, \
                    bias=1.0, \
                    alpha = 0.001/9.0, \
                    beta = 0.75, \
                    name="norm1")
            net = Conv2dLayer(net, \
                    act=tf.nn.relu, \
                    shape=[5,5,64,64], \
                    name="conv2")
            net = LocalResponseNormLayer(net, \
                    depth_radius=4, \
                    bias=1.0, \
                    alpha=0.001/9.0, \
                    beta = 0.75, \
                    name="norm2")
            net = MaxPool2d(net, \
                    filter_size=(3,3), \
                    strides=(2,2), \
                    name="pool2")
            net = FlattenLayer(net, name="flatten_1")
            net = DenseLayer(net, n_units=384, name="local3", act=tf.nn.relu)

            net = DenseLayer(net, n_units=192, name="local4", act=tf.nn.relu)
            net = DenseLayer(net, n_units=10, name="softmax_linear", act=tf.identity)

            cla_vars = tf.contrib.framework.get_variables(vs)
            def name_fixer(var):
                return var.op.name.replace("W", "weights") \
                                    .replace("b", "biases") \
                                    .replace("weights_conv2d", "weights") \
                                    .replace("biases_conv2d", "biases")
            cla_vars = {name_fixer(var): var for var in cla_vars}
            return net.outputs, cla_vars
示例#11
0
def discriminator1(inputs, is_train=True, reuse=False):
    df_dim = 32   # Dimension of discrim filters in first conv layer. [64]
    w_init = tf.glorot_normal_initializer()
    gamma_init = tf.random_normal_initializer(1., 0.02)
    lrelu = lambda x : tf.nn.leaky_relu(x, 0.2)

    with tf.name_scope("DISCRIMINATOR1"):
        with tf.variable_scope("discriminator", reuse=reuse):
        
            with tf.name_scope("net_in"):
                net_in = InputLayer(inputs, name='d/in')

            with tf.name_scope("layer0"):
                net_h0 = Conv2d(net_in, df_dim, (3, 3), (3, 3), act=lrelu,
                    padding='SAME', W_init=w_init, name='d/h0/conv2d')

            with tf.name_scope("layer1"):
                net_h1 = Conv2d(net_h0, df_dim*2, (3, 3), (3, 3), act=None,
                    padding='SAME', W_init=w_init, name='d/h1/conv2d')
                net_h1 = BatchNormLayer(net_h1, decay=0.9, act=lrelu,
                    is_train=is_train, gamma_init=gamma_init, name='d/h1/batch_norm')

            with tf.name_scope("layer2"):
                net_h2 = Conv2d(net_h1, df_dim*4, (3, 3), (3, 3), act=None,
                    padding='SAME', W_init=w_init, name='d/h2/conv2d')
                net_h2 = BatchNormLayer(net_h2, decay=0.9, act=lrelu,
                    is_train=is_train, gamma_init=gamma_init, name='d/h2/batch_norm')

            with tf.name_scope("layer3"):
                net_h3 = Conv2d(net_h2, df_dim*8, (3, 3), (3, 3), act=None,
                    padding='SAME', W_init=w_init, name='d/h3/conv2d')
                net_h3 = BatchNormLayer(net_h3, decay=0.9, act=lrelu,
                    is_train=is_train, gamma_init=gamma_init, name='d/h3/batch_norm')

            with tf.name_scope("layer4"):
                net_h4 = FlattenLayer(net_h3, name='d/h4/flatten')
                net_h4 = DenseLayer(net_h4, n_units=df_dim*8, act=tf.identity,
                    W_init = w_init, name='d/h4/lin_sigmoid')

            with tf.name_scope("layer5"):
                net_h5 = FlattenLayer(net_h4, name='d/h5/flatten')
                net_h5 = DenseLayer(net_h5, n_units=df_dim*8, act=tf.identity,
                    W_init = w_init, name='d/h5/lin_sigmoid')

        #net_h6 = FlattenLayer(net_h5, name='d/h6/flatten')
            with tf.name_scope("layer6"):
                net_h6= DenseLayer(net_h5, n_units=2, act=tf.identity,
                    W_init = w_init, name='d/h6/lin_sigmoid')
                logits1 = net_h6.outputs
                net_h6.outputs = tf.nn.softplus(net_h6.outputs)
        return net_h6, logits1
示例#12
0
def cifar10_classifier(im, reuse):
    with tf.variable_scope('C', reuse=reuse) as vs:
        net = InputLayer(im)
        net = Conv2dLayer(net, \
                act=tf.nn.relu, \
                shape=[5,5,3,64], \
                name="conv1")
        net = MaxPool2d(net, \
                filter_size=(3,3), \
                strides=(2,2), \
                name="pool1")
        net = LocalResponseNormLayer(net, \
                depth_radius=4, \
                bias=1.0, \
                alpha = 0.001/9.0, \
                beta = 0.75, \
                name="norm1")
        net = Conv2dLayer(net, \
                act=tf.nn.relu, \
                shape=[5,5,64,64], \
                name="conv2")
        net = LocalResponseNormLayer(net, \
                depth_radius=4, \
                bias=1.0, \
                alpha=0.001/9.0, \
                beta = 0.75, \
                name="norm2")
        net = MaxPool2d(net, \
                filter_size=(3,3), \
                strides=(2,2), \
                name="pool2")
        net = FlattenLayer(net, name="flatten_1")
        net = DenseLayer(net, n_units=384, name="local3", act=tf.nn.relu)
        net = DenseLayer(net, n_units=192, name="local4", act=tf.nn.relu)
        net = DenseLayer(net,
                         n_units=10,
                         name="softmax_linear",
                         act=tf.identity)
        cla_vars = tf.contrib.framework.get_variables(vs)

        def name_fixer(var):
            return var.op.name.replace("W", "weights") \
                                .replace("b", "biases") \
                                .replace("weights_conv2d", "weights") \
                                .replace("biases_conv2d", "biases")

        cla_vars = {name_fixer(var): var for var in cla_vars}
        if not reuse:
            return net.outputs, tf.argmax(net.outputs, axis=1), cla_vars
        return net.outputs, tf.argmax(net.outputs, axis=1)
def model(encode_seqs, decode_seqs, is_train=True, reuse=False):
    with tf.variable_scope("model", reuse=reuse):
        with tf.variable_scope("embedding") as vs:
            net_encode = EmbeddingInputlayer(
                inputs=encode_seqs,
                vocabulary_size=xvocab_size,
                embedding_size=embedding_dimension,
                name='seq_embedding')
            vs.reuse_variables()
            net_decode = EmbeddingInputlayer(
                inputs=decode_seqs,
                vocabulary_size=xvocab_size,
                embedding_size=embedding_dimension,
                name='seq_embedding')
        net_rnn = Seq2Seq(
            net_encode,
            net_decode,
            cell_fn=tf.contrib.rnn.BasicLSTMCell,
            n_hidden=embedding_dimension,
            initializer=tf.random_uniform_initializer(-0.1, 0.1),
            encode_sequence_length=retrieve_seq_length_op2(encode_seqs),
            decode_sequence_length=retrieve_seq_length_op2(decode_seqs),
            initial_state_encode=None,
            n_layer=3,
            return_seq_2d=True,
            name='seq2seq')
        net_out = DenseLayer(net_rnn,
                             n_units=xvocab_size,
                             act=tf.identity,
                             name='output')
    return net_out, net_rnn
示例#14
0
    def inference(self, inputs, n_classes):
        if (get_name_scope() != 'train') and (get_name_scope() != 'test'):
            raise Exception('name_scope is not train or test')

        reuse = self.get_resue()
        inputs = tf.identity(inputs, 'placeholder_inputs')

        op_ = self.op_set[get_name_scope()]
        with tf.variable_scope(self.model_name, reuse=reuse):
            tl.layers.set_name_reuse(reuse)
            network = self.network_func(inputs, get_name_scope() == 'train')
            op_['step'] = tf.get_variable(name='train_step',
                                          shape=(),
                                          dtype=tf.int32)
            op_['update_step'] = tf.assign(op_['step'],
                                           tf.add(op_['step'],
                                                  tf.constant(1),
                                                  name='train_step_add_one'),
                                           name='update_train_step')
            op_['network'] = DenseLayer(network,
                                        n_units=n_classes,
                                        act=tl.activation.identity,
                                        name='logits')

        op_['logits'] = op_['network'].outputs
        op_['softmax_logits'] = tf.nn.softmax(op_['logits'])
        op_['result'] = tf.argmax(op_['logits'], 1)
        return self
示例#15
0
def create_model(encode_seqs, decode_seqs, src_vocab_size, emb_dim, is_train=True, reuse=False):
    with tf.variable_scope("model", reuse=reuse):
        # for chatbot, you can use the same embedding layer,
        # for translation, you may want to use 2 seperated embedding layers
        with tf.variable_scope("embedding") as vs:
            net_encode = EmbeddingInputlayer(
                inputs = encode_seqs,
                vocabulary_size = src_vocab_size,
                embedding_size = emb_dim,
                name = 'seq_embedding')
            vs.reuse_variables()
            net_decode = EmbeddingInputlayer(
                inputs = decode_seqs,
                vocabulary_size = src_vocab_size,
                embedding_size = emb_dim,
                name = 'seq_embedding')
            
        net_rnn = Seq2Seq(net_encode, net_decode,
                cell_fn = tf.nn.rnn_cell.LSTMCell,
                n_hidden = emb_dim,
                initializer = tf.random_uniform_initializer(-0.1, 0.1),
                encode_sequence_length = retrieve_seq_length_op2(encode_seqs),
                decode_sequence_length = retrieve_seq_length_op2(decode_seqs),
                initial_state_encode = None,
                dropout = (0.5 if is_train else None),
                n_layer = 1,
                return_seq_2d = True,
                name = 'seq2seq')

        net_out = DenseLayer(net_rnn, n_units=src_vocab_size, act=tf.identity, name='output')
    return net_out, net_rnn
    def __get_network_autoencoder__(self, model_name, encode_seqs, decode_seqs, reuse=False, is_train=True):
        with tf.variable_scope(model_name, reuse=reuse):
            tl.layers.set_name_reuse(reuse)

            net_encode = InputLayer(
                inputs=encode_seqs,
                name="in_word_embed_encode"
            )
            net_decode = InputLayer(
                inputs=decode_seqs,
                name="in_word_embed_decode"
            )

            net_seq2seq = Seq2Seq(
                net_encode, net_decode,
                cell_fn = tf.contrib.rnn.BasicLSTMCell,
                n_hidden = 512,
                initializer = tf.random_uniform_initializer(-0.1, 0.1),
                encode_sequence_length = retrieve_seq_length_op(encode_seqs),
                decode_sequence_length = retrieve_seq_length_op(decode_seqs),
                initial_state_encode = None,
                n_layer = 1,
                return_seq_2d = True,
                name = 'seq2seq'
            )
            net_out = DenseLayer(net_seq2seq, n_units=self.vocab_size, act=tf.identity, name='output')

        return net_out, net_seq2seq
示例#17
0
文件: model.py 项目: zyw1218/OUCML
def discriminator(inputs, is_train=True, reuse=False):
    df_dim = 64  # Dimension of discrim filters in first conv layer. [64]
    w_init = tf.glorot_normal_initializer()
    gamma_init = tf.random_normal_initializer(1., 0.02)

    with tf.variable_scope("discriminator", reuse=reuse):

        net_in = InputLayer(inputs, name='d/in')
        net_h0 = Conv2d(net_in,
                        df_dim, (5, 5), (2, 2),
                        act=tf.nn.leaky_relu,
                        padding='SAME',
                        W_init=w_init,
                        name='d/h0/conv2d')

        net_h1 = Conv2d(net_h0,
                        df_dim * 2, (5, 5), (2, 2),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        name='d/h1/conv2d')
        net_h1 = BatchNormLayer(net_h1,
                                act=tf.nn.leaky_relu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='d/h1/batch_norm')

        net_h2 = Conv2d(net_h1,
                        df_dim * 4, (5, 5), (2, 2),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        name='d/h2/conv2d')
        net_h2 = BatchNormLayer(net_h2,
                                act=tf.nn.leaky_relu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='d/h2/batch_norm')

        net_h3 = Conv2d(net_h2,
                        df_dim * 8, (5, 5), (2, 2),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        name='d/h3/conv2d')
        net_h3 = BatchNormLayer(net_h3,
                                act=tf.nn.leaky_relu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='d/h3/batch_norm')

        net_h4 = FlattenLayer(net_h3, name='d/h4/flatten')
        net_h4 = DenseLayer(net_h4,
                            n_units=1,
                            act=tf.identity,
                            W_init=w_init,
                            name='d/h4/lin_sigmoid')
        logits = net_h4.outputs
        net_h4.outputs = tf.nn.sigmoid(net_h4.outputs)
    return net_h4, logits
示例#18
0
    def __init__(self,
                 layer=None,
                 x_recon=None,
                 name='recon_layer',
                 n_units=100,
                 visible_unit_type='bin',
                 weight_cost=0.0001,
                 momentum=0.5,
                 learning_rate=0.001,
                 gibbs_sampling_steps=1,
                 batch_size=100,
                 act=tf.nn.sigmoid,
                 b_init=tf.constant_initializer(value=0.1)):
        DenseLayer.__init__(self,
                            layer=layer,
                            n_units=n_units,
                            act=act,
                            name=name,
                            b_init=b_init)
        print("     [TL] %s is a ReconLayer" % self.name)

        self.train_params = self.all_params[-4:]
        self.layer = layer
        self.x = x_recon
        self.name = name
        self.n_units = n_units
        self.visible_unit_type = visible_unit_type
        self.weight_cost = weight_cost
        self.momentum = momentum
        self.learning_rate = learning_rate
        self.gibbs_sampling_steps = gibbs_sampling_steps
        self.batch_size = batch_size

        self.w_update = tf.Variable(tf.constant(0.0,
                                                shape=[n_units,
                                                       layer.n_units]),
                                    name='weights_update')
        self.bh_update = tf.Variable(tf.constant(0.0, shape=[layer.n_units]),
                                     name='bh_update')
        self.bv_update = tf.Variable(tf.constant(0.0, shape=[n_units]),
                                     name='bv_update')
        self.hrand = tf.placeholder(tf.float32, [None, layer.n_units],
                                    name='hrand')
        self.vrand = tf.placeholder(tf.float32, [None, n_units], name='vrand')
        self.outputs = self.sample_hidden_from_visible(self.x)[0]

        self._init_graph()
示例#19
0
def discriminator(inputs, is_train=True, reuse=False):
    dfs = 64
    gamma_init = tf.random_normal_initializer(1., 0.02)
    W_init = tf.random_normal_initializer(stddev=0.02)

    with tf.variable_scope('discriminator', reuse=reuse):
        tl.layers.set_name_reuse(reuse)

        d = InputLayer(inputs, name='d/inputs')
        d = Conv2d(d,
                   dfs, (5, 5), (2, 2),
                   W_init=W_init,
                   act=lambda x: tl.act.lrelu(x, 0.2),
                   name='d/conv1')

        d = Conv2d(d,
                   dfs * 2, (5, 5), (2, 2),
                   W_init=W_init,
                   act=None,
                   name='d/conv2')
        d = BatchNormLayer(d,
                           act=lambda x: tl.act.lrelu(x, 0.2),
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='d/bn3')

        d = Conv2d(d,
                   dfs * 4, (5, 5), (2, 2),
                   W_init=W_init,
                   act=None,
                   name='d/conv4')
        d = BatchNormLayer(d,
                           act=lambda x: tl.act.lrelu(x, 0.2),
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='d/bn5')

        d = Conv2d(d,
                   dfs * 8, (5, 5), (2, 2),
                   W_init=W_init,
                   act=None,
                   name='d/conv6')
        d = BatchNormLayer(d,
                           act=lambda x: tl.act.lrelu(x, 0.2),
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='d/bn7')

        d = FlattenLayer(d, name='d/flt8')
        d = DenseLayer(d,
                       1,
                       act=tl.act.identity,
                       W_init=W_init,
                       name='d/output')

        logits = d.outputs
        d.outputs = tf.nn.sigmoid(d.outputs)
        return d, logits
示例#20
0
文件: demo.py 项目: KCSoftDL/--APP
    def vgg16_net(net_in,alpha1,alpha2):
        with tf.name_scope('preprocess') as scope:
            # 减去全局均值,做归一化
            net_in.outputs = net_in.outputs * 255.0
            mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
            net_in.outputs = net_in.outputs - mean
        """conv1"""
        network= Conv2d(net_in, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv1_1')
        network= Conv2d(network, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv1_2')
        network= MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1')
        """conv2"""
        network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',name='conv2_1')
        network_1 = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',name='conv2_2')
        network = MaxPool2d(network_1, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2')
        """conv3"""
        network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',name='conv3_1')
        network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',name='conv3_2')
        network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',name='conv3_3')
        """特征谱融合模块区——底层特征融合"""
        network_1=tf.nn.dilation2d(network_1,filter=256,strides=[1,1,1,1],rates=[1,3,3,1],padding='SAME',name='dilation1')
        network_1=MaxPool2d(network_1,filter_size=(2,2),strides=(2,2),padding='SAME',name='Pool6_1')
        #代替caffe框架中的scale层
        network_1=alpha1*tf.divide(network_1,tf.norm(network_1,ord='euclidean'))
        #主分支的特征加低层特征处理后的特征谱图作为下一层输入
        network=tf.add(network,network_1,name='Eltwise1')
        network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool3')
        """conv4"""
        network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',name='conv4_1')
        network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',name='conv4_2')
        network_1 = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',name='conv4_3')
        network = MaxPool2d(network_1, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool4')
        """conv5"""
        network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',name='conv5_1')
        network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',name='conv5_2')
        network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',name='conv5_3')
        """特征谱融合模块区——高层特征融合"""
        network_1= tf.nn.dilation2d(network_1, filter=512, strides=[1, 1, 1, 1], rates=[1, 3, 3, 1], padding='SAME',name='dilation2')

        network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool5')
        """fc_layer"""
        network=FlattenLayer(network,name='flatten')
        network=DenseLayer(network,n_units=4096,act=tf.nn.relu,name='fc1_relu')
        network = DenseLayer(network, n_units=4096, act=tf.nn.relu, name='fc2_relu')
        network = DenseLayer(network, n_units=1000, act=tf.identity, name='fc3_relu')
        return network
示例#21
0
    def __get_network__(self,
                        encode_seq,
                        decode_seq,
                        query_decode_seq,
                        is_train=True,
                        reuse=False):

        w_init = tf.random_normal_initializer(stddev=0.02)
        g_init = tf.random_normal_initializer(1., 0.02)

        with tf.variable_scope(self.model_name, reuse=reuse) as vs:
            tl.layers.set_name_reuse(reuse)
            net_encode_traffic = InputLayer(encode_seq, name='in_root_net')
            net_encode_query = InputLayer(self.query_x, name="in_query_net")
            net_encode = ConcatLayer([net_encode_traffic, net_encode_query],
                                     concat_dim=-1,
                                     name="encode")

            net_decode_traffic = InputLayer(decode_seq, name="decode_root")
            net_decode_query = InputLayer(query_decode_seq,
                                          name="decode_query_net")
            net_decode = ConcatLayer([net_decode_traffic, net_decode_query],
                                     concat_dim=-1,
                                     name="decode")

            net_rnn = Seq2Seq(
                net_encode,
                net_decode,
                cell_fn=tf.contrib.rnn.BasicLSTMCell,
                n_hidden=config.dim_hidden,
                initializer=tf.random_uniform_initializer(-0.1, 0.1),
                encode_sequence_length=tl.layers.retrieve_seq_length_op(
                    net_encode.outputs),
                decode_sequence_length=tl.layers.retrieve_seq_length_op(
                    net_decode.outputs),
                initial_state_encode=None,
                # dropout=(0.8 if is_train else None),
                dropout=None,
                n_layer=1,
                return_seq_2d=True,
                name='seq2seq')
            # net_out = DenseLayer(net_rnn, n_units=64, act=tf.identity, name='dense1')
            net_out = DenseLayer(net_rnn,
                                 n_units=1,
                                 act=tf.identity,
                                 name='dense2')
            if is_train:
                net_out = ReshapeLayer(
                    net_out, (config.batch_size, config.out_seq_length + 1, 1),
                    name="reshape_out")
            else:
                net_out = ReshapeLayer(net_out, (config.batch_size, 1, 1),
                                       name="reshape_out")

            self.net_rnn = net_rnn

            return net_out
示例#22
0
def generator(inputs, is_train=True, reuse=False):
    image_size = 128
   
    gf_dim = 64    # Dimension of gen filters in first conv layer. [64]
    c_dim = 1    # n_color 1
    w_init = tf.glorot_normal_initializer()
    gamma_init = tf.random_normal_initializer(1., 0.02)

    with tf.name_scope("GENERATOR"):

        with tf.variable_scope("generator", reuse=reuse):


            with tf.name_scope("net_in"):
                net_in = InputLayer(inputs, name='g/in')
        #############################################################################
            with tf.name_scope("layer0"):
                net_h0 = DenseLayer(net_in, n_units=(gf_dim * 32 * 4 * 4), W_init=w_init,
                act = tf.identity, name='g/h0/lin')
                net_h0 = ReshapeLayer(net_h0, shape=[-1, 4, 4, gf_dim * 32], name='g/h0/reshape')
                net_h0 = BatchNormLayer(net_h0, decay=0.9, act=tf.nn.relu, is_train=is_train,
                gamma_init=gamma_init, name='g/h0/batch_norm')

            with tf.name_scope("layer1"):
                net_h1 = DeConv2d(net_h0, gf_dim * 8, (5, 5), strides=(2, 2),
                padding='SAME', act=None, W_init=w_init, name='g/h1/decon2d')
                net_h1 = BatchNormLayer(net_h1, decay=0.9, act=tf.nn.relu, is_train=is_train,
                gamma_init=gamma_init, name='g/h1/batch_norm')

            with tf.name_scope("layer2"):
                net_h2 = DeConv2d(net_h1, gf_dim * 4, (5, 5), strides=(2, 2),
                padding='SAME', act=None, W_init=w_init, name='g/h2/decon2d')
                net_h2 = BatchNormLayer(net_h2, decay=0.9, act=tf.nn.relu, is_train=is_train,
                gamma_init=gamma_init, name='g/h2/batch_norm')

            with tf.name_scope("layer3"):
                net_h3 = DeConv2d(net_h2, gf_dim*2, (5, 5), strides=(2, 2),
                padding='SAME', act=None, W_init=w_init, name='g/h3/decon2d')
                net_h3 = BatchNormLayer(net_h3, decay=0.9, act=tf.nn.relu, is_train=is_train,
                gamma_init=gamma_init, name='g/h3/batch_norm')


            with tf.name_scope("layer4"):
                net_h4 = DeConv2d(net_h3, gf_dim, (5, 5), strides=(2, 2),
                padding='SAME', act=None, W_init=w_init, name='g/h4/decon2d')
                net_h4 = BatchNormLayer(net_h4, decay=0.9, act=tf.nn.relu, is_train=is_train,
                gamma_init=gamma_init, name='g/h4/batch_norm')

            with tf.name_scope("layer5"):
                net_h5 = DeConv2d(net_h4, c_dim, (5, 5), strides=(2, 2),
                padding='SAME', act=None, W_init=w_init, name='g/h5/decon2d')
        #net_h5.outputs = tf.nn.tanh(net_h5.outputs)
                net_h5.outputs = tf.nn.tanh(net_h5.outputs)

        return net_h5
示例#23
0
def create_model_pretrained(encode_seqs,
                            decode_seqs,
                            src_vocab_size,
                            emb_dim,
                            hidden_size,
                            pretrainedModelName,
                            is_train=True,
                            reuse=False):
    with tf.variable_scope("model", reuse=reuse):
        # for chatbot, you can use the same embedding layer,
        # for translation, you may want to use 2 seperated embedding layers
        word2idx, weights, vocab_size, embedding_dim = loadGloveModel(
            pretrainedModelName)  #'glove.6B.100d.txt')

        with tf.variable_scope("embedding") as vs:
            glove_weights_initializer = tf.constant_initializer(weights)
            embedding_encode = EmbeddingInputlayer(
                inputs=encode_seqs,
                vocabulary_size=vocab_size,
                embedding_size=embedding_dim,
                E_init=glove_weights_initializer,
                name='seq_embedding')
            vs.reuse_variables()
            embedding_decode = EmbeddingInputlayer(
                inputs=decode_seqs,
                vocabulary_size=vocab_size,
                embedding_size=embedding_dim,
                E_init=glove_weights_initializer,
                name='seq_embedding')

        net_rnn = Seq2Seq(
            embedding_encode,
            embedding_decode,
            cell_fn=tf.nn.rnn_cell.LSTMCell,
            n_hidden=hidden_size,
            initializer=tf.random_uniform_initializer(-0.1, 0.1),
            encode_sequence_length=retrieve_seq_length_op2(encode_seqs),
            decode_sequence_length=retrieve_seq_length_op2(decode_seqs),
            initial_state_encode=None,
            dropout=(0.5 if is_train else None),
            n_layer=3,
            return_seq_2d=True,
            name='seq2seq')

        net_out = DenseLayer(net_rnn,
                             n_units=src_vocab_size,
                             act=tf.identity,
                             name='output')
    return net_out, net_rnn
示例#24
0
    def my_net(net_in,y_,reuse,is_train):

        x1 = tf.image.central_crop(net_in, 0.5)
        x2 = net_in

        # x2 = imresize(x2, (448, 448))
        # x2 = tf.cast(net_in, tf.uint8)
        # x2 = tf.reshape(x2,[448,448,3])
        # x2 = tf.cast(x2, tf.float32)

        network1 = Model_base.partnetwork(x1)
        network2 = Model_base.partnetwork(x2)

        network = tf.add(network1,network2,name='Eltwise3')
        """fc_layer"""
        network = FlattenLayer(network,name='flatten')
        network = DenseLayer(network,n_units=4096,act=tf.nn.relu,name='fc1_relu')
        network = DenseLayer(network, n_units=4096, act=tf.nn.relu, name='fc2_relu')
        network = DenseLayer(network, n_units=5, act=tf.identity, name='fc3_relu')

        # network.partnet1=network1
        # network.partnet2=network2
        # network.all_layers=list(network.all_layers)+list(network.partnet1.all_layers)

        y=network.outputs

        ce=tl.cost.cross_entropy(y,y_,name='cost')
        L2=0
        for p in tl.layers.get_variables_with_name('relu/W',True,True):
            L2+=Model_base.l2_regularizer(0.004)(p)
        cost=ce+L2

        correct=tf.equal(tf.cast(tf.arg_max(y,1),tf.int32),y_)
        acc=tf.reduce_mean(tf.cast(correct,tf.float32))

        return network,cost,acc
示例#25
0
    def ShuffleNetV1(self, inputlayer, name):
        inputlayer = InputLayer(inputlayer, name='input')#32*32*2
        #print(inputlayer.outputs.get_shape())
        x = Conv2d(inputlayer, 24, (3, 3), strides=(2, 2), padding='SAME', act=tf.nn.relu, name=name+'_Con2d')###24
        x = MaxPool2d(x, filter_size=(3, 3), strides=(2, 2), padding='SAME', name=name+'_MaxPool')
        x = self.stage(x, n_filter=384, filter_size=(3, 3), groups=8, repeat=4, stage=2, name=name+'_stage1')
        #print("stage1 finished!!!!!!!!!!!!!!!!")
        x = self.stage(x, n_filter=768, filter_size=(3, 3), groups=8, repeat=8, stage=3, name=name+'_stage2')
        #print("stage2 finished!!!!!!!!!!!!!!!!")
        x = self.stage(x, n_filter=1536, filter_size=(3, 3), groups=8, repeat=4, stage=4, name=name+'_stage3')
        #print("stage3 finished!!!!!!!!!!!!!!!!")
        print("stage3", x.outputs.get_shape())
        print(x.count_params())
        #x = GlobalMaxPool2d(x, name=name+'_GlobalMaxPool')
        #print("GMP", x.outputs.get_shape())
        #print(x.count_params())
        x = GlobalMeanPool2d(x, name=name+'_GlobalMaxPool')
        print("GAP", x.outputs.get_shape())
        print(x.count_params())
        x = DenseLayer(x, name=name+'_Dense')
        print("DENSE", x.outputs.get_shape())
        print(x.count_params())

        return x
示例#26
0
    def _build_net(self, is_train=True, reuse=None):
        with tf.variable_scope(self.name, reuse=reuse):
            n = InputLayer(self.x / 255, name='in')

            n = Conv2d(n, 32, (3, 3), (1, 1), tf.nn.relu, "VALID", name='c1/1')
            n = Conv2d(n, 32, (3, 3), (1, 1), tf.nn.relu, "VALID", name='c1/2')
            n = MaxPool2d(n, (2, 2), (2, 2), 'VALID', name='max1')

            n = DropoutLayer(n,
                             0.75,
                             is_fix=True,
                             is_train=is_train,
                             name='drop1')

            n = Conv2d(n, 64, (3, 3), (1, 1), tf.nn.relu, "VALID", name='c2/1')
            n = Conv2d(n, 64, (3, 3), (1, 1), tf.nn.relu, "VALID", name='c2/2')
            n = MaxPool2d(n, (2, 2), (2, 2), 'VALID', name='max2')
            n = DropoutLayer(n,
                             0.75,
                             is_fix=True,
                             is_train=is_train,
                             name='drop2')

            n = FlattenLayer(n, name='f')
            n = DenseLayer(n, 512, tf.nn.relu, name='dense1')
            n = DropoutLayer(n,
                             0.5,
                             is_fix=True,
                             is_train=is_train,
                             name='drop3')
            n = DenseLayer(n, n_action, tf.nn.tanh, name='o')

        if is_train:
            self.n_train = n
        else:
            self.n_test = n
示例#27
0
    def __get_network__(self,
                        model_name,
                        encode_seqs,
                        reuse=False,
                        is_train=True):
        # the architecture of networks
        with tf.variable_scope(model_name, reuse=reuse):
            # tl.layers.set_name_reuse(reuse)
            net_in = InputLayer(inputs=encode_seqs, name="in_word_embed")

            filter_length = [3, 4, 5]
            n_filter = 200
            net_cnn_list = list()
            for fsz in filter_length:
                net_cnn = Conv1d(net_in,
                                 n_filter=n_filter,
                                 filter_size=fsz,
                                 stride=1,
                                 act=tf.nn.relu,
                                 name="cnn%d" % fsz)
                net_cnn.outputs = tf.reduce_max(net_cnn.outputs,
                                                axis=1,
                                                name="global_maxpool%d" % fsz)
                net_cnn_list.append(net_cnn)

            net_cnn = ConcatLayer(net_cnn_list, concat_dim=-1)
            net_fc = DenseLayer(net_cnn,
                                n_units=300,
                                act=tf.nn.relu,
                                name="fc_1")

            net_fc = DenseLayer(net_fc,
                                n_units=1,
                                act=tf.nn.sigmoid,
                                name="fc_2")
        return net_fc, net_cnn
示例#28
0
    def _build_net(self):
        w_init = tf.contrib.layers.xavier_initializer()
        with tf.variable_scope('actor'):  # Policy network
            nn = InputLayer(self.s, name='in')
            nn = DenseLayer(nn, n_units=500, act=tf.nn.relu6, W_init=w_init, name='la')
            nn = DenseLayer(nn, n_units=300, act=tf.nn.relu6, W_init=w_init, name='la2')
            mu = DenseLayer(nn, n_units=N_A, act=tf.nn.tanh, W_init=w_init, name='mu')
            sigma = DenseLayer(nn, n_units=N_A, act=tf.nn.softplus, W_init=w_init, name='sigma')
            self.mu = mu.outputs
            self.sigma = sigma.outputs

        with tf.variable_scope('critic'):  # we use Value-function here, but not Q-function.
            nn = InputLayer(self.s, name='in')
            nn = DenseLayer(nn, n_units=500, act=tf.nn.relu6, W_init=w_init, name='lc')
            nn = DenseLayer(nn, n_units=200, act=tf.nn.relu6, W_init=w_init, name='lc2')
            v = DenseLayer(nn, n_units=1, W_init=w_init, name='v')
            self.v = v.outputs
示例#29
0
    def generator(self, z, label_class, is_train=True, reuse=False):
        # NOTE: concate z & label might be wrong, need to test
        labels_one_hot = tf.one_hot(label_class, self.class_num)
        z_labels = tf.concat([z, labels_one_hot], 1)
        image_size = self.images_size
        s16 = image_size // 16
        gf_dim = 64    # Dimension of gen filters in first conv layer. [64]
        c_dim = self.channel    # n_color 3
        w_init = tf.glorot_normal_initializer()
        gamma_init = tf.random_normal_initializer(1., 0.02)

        with tf.variable_scope("generator", reuse=reuse):
            net_in = InputLayer(z_labels, name='g/in')
            net_h0 = DenseLayer(net_in, n_units=(gf_dim * 8 * s16 * s16), W_init=w_init,
                    act = tf.identity, name='g/h0/lin')
            net_h0 = ReshapeLayer(net_h0, shape=[-1, s16, s16, gf_dim*8], name='g/h0/reshape')
            net_h0 = BatchNormLayer(net_h0, decay=0.9, act=tf.nn.relu, is_train=is_train,
                    gamma_init=gamma_init, name='g/h0/batch_norm')

            net_h1 = DeConv2d(net_h0, gf_dim * 4, (5, 5), strides=(2, 2),
                    padding='SAME', act=None, W_init=w_init, name='g/h1/decon2d')
            net_h1 = BatchNormLayer(net_h1, decay=0.9, act=tf.nn.relu, is_train=is_train,
                    gamma_init=gamma_init, name='g/h1/batch_norm')

            net_h2 = DeConv2d(net_h1, gf_dim * 2, (5, 5), strides=(2, 2),
                    padding='SAME', act=None, W_init=w_init, name='g/h2/decon2d')
            net_h2 = BatchNormLayer(net_h2, decay=0.9, act=tf.nn.relu, is_train=is_train,
                    gamma_init=gamma_init, name='g/h2/batch_norm')

            net_h3 = DeConv2d(net_h2, gf_dim, (5, 5), strides=(2, 2),
                    padding='SAME', act=None, W_init=w_init, name='g/h3/decon2d')
            net_h3 = BatchNormLayer(net_h3, decay=0.9, act=tf.nn.relu, is_train=is_train,
                    gamma_init=gamma_init, name='g/h3/batch_norm')

            net_h4 = DeConv2d(net_h3, c_dim, (5, 5), strides=(2, 2),
                    padding='SAME', act=None, W_init=w_init, name='g/h4/decon2d')
            net_h4.outputs = tf.nn.tanh(net_h4.outputs)
        return net_h4
示例#30
0
def _model(encode_seqs, decode_seqs, hypes, metadata, mode):
    # We add two here for start, end ids as well as unknown and pad.
    xvocab_size = len(metadata['idx2w']) + 2

    reuse = (mode != ModeKeys.TRAIN)
    with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
        # for chatbot, you can use the same embedding layer
        with tf.variable_scope("embedding") as vs:
            net_encode = EmbeddingInputlayer(inputs=encode_seqs,
                                             vocabulary_size=xvocab_size,
                                             embedding_size=hypes['emb_dim'],
                                             name='seq_embedding')
            vs.reuse_variables()
            # tl.layers.set_name_reuse(True) # remove if TL version == 1.8.0+
            net_decode = EmbeddingInputlayer(inputs=decode_seqs,
                                             vocabulary_size=xvocab_size,
                                             embedding_size=hypes['emb_dim'],
                                             name='seq_embedding')
        cell_fn = tf.contrib.rnn.GRUCell if hypes[
            'cell_fn'] == 'GRU' else tf.contrib.rnn.BasicLSTMCell
        net_rnn = Seq2Seq(
            net_encode,
            net_decode,
            cell_fn=cell_fn,
            n_hidden=hypes['emb_dim'],
            initializer=tf.random_uniform_initializer(-0.1, 0.1),
            encode_sequence_length=retrieve_seq_length_op2(encode_seqs),
            decode_sequence_length=retrieve_seq_length_op2(decode_seqs),
            initial_state_encode=None,
            dropout=(hypes['dropout'] if mode == ModeKeys.TRAIN else None),
            n_layer=hypes['seq2seq']['n_layer'],
            return_seq_2d=True,
            name='seq2seq')
        net_out = DenseLayer(net_rnn,
                             n_units=xvocab_size,
                             act=tf.identity,
                             name='output')
    return net_out, net_rnn
        # tl.layers.set_name_reuse(True)
        net_decode = EmbeddingInputlayer(inputs=decode_seqs, vocabulary_size=10000, embedding_size=200, name='seq_embed')
    net = Seq2Seq(
        net_encode,
        net_decode,
        cell_fn=tf.contrib.rnn.BasicLSTMCell,
        n_hidden=200,
        initializer=tf.random_uniform_initializer(-0.1, 0.1),
        encode_sequence_length=retrieve_seq_length_op2(encode_seqs),
        decode_sequence_length=retrieve_seq_length_op2(decode_seqs),
        initial_state_encode=None,
        dropout=None,
        n_layer=2,
        return_seq_2d=True,
        name='Seq2seq')
net = DenseLayer(net, n_units=10000, act=tf.identity, name='oo')
e_loss = tl.cost.cross_entropy_seq_with_mask(logits=net.outputs, target_seqs=target_seqs, input_mask=target_mask, return_details=False, name='cost')
y = tf.nn.softmax(net.outputs)

net.print_layers()
net.print_params(False)

shape = net.outputs.get_shape().as_list()
if shape[-1] != 10000:
    raise Exception("shape dont match")

if len(net.all_layers) != 5:
    raise Exception("layers dont match")

if len(net.all_params) != 11:
    raise Exception("params dont match")