示例#1
0
 def get_global_energy(self,
                       xinput=None,
                       yinput=None,
                       embedding=None,
                       reuse=False):
     with tf.variable_scope(self.config.en_variable_scope) as scope:
         j = 0
         net = yinput
         for (sz, a) in self.config.en_layer_info:
             net = tflearn.fully_connected(
                 net,
                 sz,
                 weight_decay=self.config.weight_decay,
                 weights_init=tfi.variance_scaling(),
                 bias_init=tfi.zeros(),
                 reuse=reuse,
                 regularizer='L2',
                 # activation=a,
                 scope=("en.h" + str(j)))
             net = tf.log(tf.exp(net) + 1.0)
             j = j + 1
         global_e = tflearn.fully_connected(
             net,
             1,
             activation='linear',
             weight_decay=self.config.weight_decay,
             weights_init=tfi.zeros(),
             bias=False,
             reuse=reuse,
             regularizer='L2',
             scope=("en.g"))
         return tf.squeeze(global_e)
示例#2
0
    def softmax_prediction_network2(self, xinput=None, reuse=False):
        net = xinput
        print("xinput", xinput)
        with tf.variable_scope("pred") as scope:
            net = tflearn.fully_connected(
                net,
                1000,
                regularizer='L2',
                weight_decay=self.config.weight_decay,
                weights_init=tfi.variance_scaling(),
                bias_init=tfi.zeros(),
                reuse=reuse,
                scope=("ph.0"))
            net = tf.nn.relu(net)
            net = tflearn.layers.dropout(net, 1 - self.config.dropout)

            net = tflearn.fully_connected(
                net,
                self.config.output_num * self.config.dimension,
                activation='linear',
                weight_decay=self.config.weight_decay,
                weights_init=tfi.variance_scaling(),
                bias_init=tfi.zeros(),
                reuse=reuse,
                regularizer='L2',
                scope=("ph.1"))

        cat_output = tf.reshape(
            net, (-1, self.config.output_num, self.config.dimension))

        return tf.nn.softmax(cat_output, dim=2)
示例#3
0
    def softmax_prediction_network(self, hidden_vars, reuse=False):
        net = hidden_vars
        with tf.variable_scope(self.config.spen_variable_scope):
            with tf.variable_scope("pred") as scope:
                net = tflearn.fully_connected(
                    net,
                    100,
                    activation='relu',
                    weight_decay=self.config.weight_decay,
                    weights_init=tfi.variance_scaling(),
                    bias_init=tfi.zeros(),
                    reuse=reuse,
                    scope=("ph.0"))

                net = tflearn.fully_connected(
                    net,
                    self.config.output_num * self.config.dimension,
                    activation='softplus',
                    weight_decay=self.config.weight_decay,
                    weights_init=tfi.variance_scaling(),
                    bias_init=tfi.zeros(),
                    reuse=reuse,
                    scope=("ph.1"))

        cat_output = tf.reshape(
            net, (-1, self.config.output_num, self.config.dimension))
        #return tf.nn.so(cat_output, dim=2)

        return tf.nn.softmax(cat_output, dim=2)
示例#4
0
    def cnn_prediction_network(self, xinput=None, reuse=False):

        # input = tf.concat((xinput, yinput), axis=1)
        net = xinput
        j = 0
        with tf.variable_scope("pred"):
            net = tf.reshape(net,
                             shape=(-1, self.config.image_width,
                                    self.config.image_height, 1))

            for (nf, fs, st) in self.config.cnn_layer_info:
                net = tflearn.conv_2d(net,
                                      nb_filter=nf,
                                      filter_size=fs,
                                      strides=st,
                                      padding="same",
                                      scope=("conv" + str(j)),
                                      activation=tf.nn.relu,
                                      reuse=reuse)
                # net = tflearn.max_pool_2d(net, kernel_size=[2,2], strides=2)
                # net = tflearn.batch_normalization(net, scope=("bn"+ str(j)), reuse=reuse)
                j = j + 1

            j = 0

            for (sz, a) in self.config.pred_layer_info:
                net = tflearn.fully_connected(
                    net,
                    sz,
                    regularizer='L2',
                    weight_decay=self.config.weight_decay,
                    weights_init=tfi.variance_scaling(),
                    bias_init=tfi.zeros(),
                    reuse=reuse,
                    scope=("ph." + str(j)))
                net = tf.nn.relu(net)
                net = tflearn.layers.dropout(net, 1 - self.config.dropout)
                j = j + 1

            net = tflearn.fully_connected(
                net,
                self.config.output_num * self.config.dimension,
                activation='linear',
                weight_decay=self.config.weight_decay,
                weights_init=tfi.variance_scaling(),
                bias_init=tfi.zeros(),
                reuse=reuse,
                regularizer='L2',
                scope=("ph.fc"))
            if self.config.dimension == 1:
                return tf.nn.sigmoid(net)
            else:
                cat_output = tf.reshape(
                    net, (-1, self.config.output_num, self.config.dimension))
                return tf.nn.softmax(cat_output, dim=2)
示例#5
0
    def get_energy_mlp(self,
                       xinput=None,
                       yinput=None,
                       embedding=None,
                       reuse=False):
        output_size = yinput.get_shape().as_list()[-1]
        with tf.variable_scope(self.config.spen_variable_scope):
            with tf.variable_scope(self.config.fx_variable_scope) as scope:
                logits = self.get_feature_net_mlp(xinput,
                                                  output_size,
                                                  reuse=reuse)
                mult = tf.multiply(logits, yinput)
                local_e = tflearn.fully_connected(
                    mult,
                    1,
                    activation='linear',
                    regularizer='L2',
                    weight_decay=self.config.weight_decay,
                    weights_init=tfi.variance_scaling(),
                    bias=False,
                    bias_init=tfi.zeros(),
                    reuse=reuse,
                    scope=("en.l"))
            with tf.variable_scope(self.config.en_variable_scope) as scope:
                j = 0
                net = yinput
                for (sz, a) in self.config.en_layer_info:
                    net = tflearn.fully_connected(
                        net,
                        sz,
                        weight_decay=self.config.weight_decay,
                        weights_init=tfi.variance_scaling(),
                        bias_init=tfi.zeros(),
                        reuse=reuse,
                        regularizer='L2',
                        scope=("en.h" + str(j)))
                    # net = tflearn.dropout(net, 1.0 - self.config.dropout)
                    # net = tflearn.layers.normalization.batch_normalization(net, reuse=reuse, scope=("bn.f" + str(j)))
                    # net = tflearn.activations.softplus(net)
                    net = tf.log(tf.exp(net) + 1.0)
                    j = j + 1
                global_e = tflearn.fully_connected(
                    net,
                    1,
                    activation='linear',
                    weight_decay=self.config.weight_decay,
                    weights_init=tfi.zeros(),
                    bias=False,
                    reuse=reuse,
                    regularizer='L2',
                    scope=("en.g"))

        return tf.squeeze(local_e + global_e)
示例#6
0
    def get_feature_net_mlp(self, xinput, output_num, reuse=False):
        print(output_num)

        net = xinput
        j = 0
        for (sz, a) in self.config.layer_info:
            print(sz, a)
            net = tflearn.fully_connected(
                net,
                sz,
                weight_decay=self.config.weight_decay,
                # weights_init=tfi.variance_scaling(,
                bias_init=tfi.zeros(),
                regularizer='L2',
                reuse=reuse,
                scope=("fx.h" + str(j)))
            net = tflearn.activations.relu(net)
            # net = tflearn.dropout(net, 1.0 - self.config.dropout)
            j = j + 1
        logits = tflearn.fully_connected(net,
                                         output_num,
                                         activation='linear',
                                         regularizer='L2',
                                         weight_decay=self.config.weight_decay,
                                         weights_init=tfi.variance_scaling(),
                                         bias=False,
                                         reuse=reuse,
                                         scope=("fx.h" + str(j)))
        return logits
示例#7
0
    def mlp_prediction_network(self, xinput=None, reuse=False):
        net = xinput
        j = 0
        with tf.variable_scope("pred") as scope:
            for (sz, a) in self.config.pred_layer_info:
                print(sz, a)
                net = tflearn.fully_connected(
                    net,
                    sz,
                    regularizer='L2',
                    activation=a,
                    weight_decay=self.config.weight_decay,
                    weights_init=tfi.variance_scaling(),
                    bias_init=tfi.zeros(),
                    reuse=reuse,
                    scope=("ph." + str(j)))
                net = tflearn.layers.dropout(net, 1 - self.config.dropout)
                j = j + 1

            logits = tflearn.fully_connected(
                net,
                self.config.output_num * self.config.dimension,
                activation='linear',
                weight_decay=self.config.weight_decay,
                weights_init=tfi.variance_scaling(),
                reuse=reuse,
                bias=False,
                regularizer='L2',
                scope=("ph.fc"))
        # if self.config.dimension == 1:
        #  return tf.nn.sigmoid(net)
        # else:
        #  cat_output = tf.reshape(net, (-1, self.config.output_num, self.config.dimension))
        #  return tf.nn.softmax(cat_output, dim=2)
        return logits
示例#8
0
 def get_feature_net_mlp(self,
                         xinput,
                         output_num,
                         embedding=None,
                         reuse=False):
     net = xinput
     j = 0
     for (sz, a) in self.config.layer_info:
         net = tflearn.fully_connected(
             net,
             sz,
             weight_decay=self.config.weight_decay,
             weights_init=tfi.variance_scaling(),
             bias_init=tfi.zeros(),
             regularizer='L2',
             reuse=reuse,
             scope=("fx.h" + str(j)))
         # net = tflearn.layers.normalization.batch_normalization(net, reuse=reuse, scope=("bn.f" + str(j)))
         net = tflearn.activations.relu(net)
         j = j + 1
     logits = tflearn.fully_connected(net,
                                      output_num,
                                      activation='linear',
                                      regularizer='L2',
                                      weight_decay=self.config.weight_decay,
                                      weights_init=tfi.variance_scaling(),
                                      bias=False,
                                      reuse=reuse,
                                      scope=("fx.h" + str(j)))
     return logits
示例#9
0
文件: duck.py 项目: akshay0190/EJGo
def get_network():
    biases = zeros(shape=[9, 19, 1, 192])
    biases2 = zeros(shape=[19, 19, 1])
    network = input_data(shape=[None, 19, 19, 2], name='input')
    network = conv_2d(network, 192, 5, activation='elu', weights_init=truncated_normal(stddev=stddev5), bias=False) + biases[0]
    network = conv_2d(network, 192, 3, activation='elu', weights_init=truncated_normal(stddev=stddev3), bias=False) + biases[1]
    network = conv_2d(network, 192, 3, activation='elu', weights_init=truncated_normal(stddev=stddev3), bias=False) + biases[2]
    network = conv_2d(network, 192, 3, activation='elu', weights_init=truncated_normal(stddev=stddev3), bias=False) + biases[3]
    network = conv_2d(network, 192, 3, activation='elu', weights_init=truncated_normal(stddev=stddev3), bias=False) + biases[4]
    network = conv_2d(network, 192, 3, activation='elu', weights_init=truncated_normal(stddev=stddev3), bias=False) + biases[5]
    network = conv_2d(network, 192, 3, activation='elu', weights_init=truncated_normal(stddev=stddev3), bias=False) + biases[6]
    network = conv_2d(network, 192, 3, activation='elu', weights_init=truncated_normal(stddev=stddev3), bias=False) + biases[7]
    network = conv_2d(network, 192, 3, activation='elu', weights_init=truncated_normal(stddev=stddev3), bias=False) + biases[8]
    network = conv_2d(network, 1, 3, activation='elu', weights_init=truncated_normal(stddev=stddev3), bias=False) + biases2
    network = fully_connected(network, 19*19, activation='softmax')
    momentum = Momentum(learning_rate=0.002)
    network = regression(network, optimizer=momentum, loss='categorical_crossentropy', name='target')
    return network
示例#10
0
    def get_feature_net_cnn(self, input, output_num, reuse=False):
        net = input
        netw = tf.nn.embedding_lookup(self.W, net)
        net_expanded = tf.expand_dims(netw, -1)
        pooled_outputs = []
        for i, filter_size in enumerate(self.config.filter_sizes):
            with tf.variable_scope("max-pooling") as scope:
                if reuse:
                    scope.reuse_variables()
                filter_shape = [
                    filter_size, self.embedding_size, 1,
                    self.config.num_filters
                ]
                W = tf.get_variable(
                    initializer=tf.truncated_normal(filter_shape, stddev=0.1),
                    name=("W" + ("-conv-maxpool-%s" % filter_size)))
                b = tf.get_variable(
                    initializer=tf.constant(0.1,
                                            shape=[self.config.num_filters]),
                    name=("b" + ("-conv-maxpool-%s" % filter_size)))
                conv = tf.nn.conv2d(net_expanded,
                                    W,
                                    strides=[1, 1, 1, 1],
                                    padding="VALID",
                                    name=("conv" +
                                          ("-conv-maxpool-%s" % filter_size)))
                # Apply nonlinearity
                h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
                # Max-pooling over the outputs
                pooled = tf.nn.max_pool(
                    h,
                    ksize=[1, self.sequence_length - filter_size + 1, 1, 1],
                    strides=[1, 1, 1, 1],
                    padding='VALID',
                    name="pool")
                pooled_outputs.append(pooled)

        num_filters_total = self.config.num_filters * len(
            self.config.filter_sizes)
        h_pool = tf.concat(pooled_outputs, 3)
        net = tf.reshape(h_pool, [-1, num_filters_total])
        net = tflearn.dropout(net, 1.0 - self.config.dropout)
        j = 0
        net2 = netw
        sz = self.config.output_num

        logits = tflearn.fully_connected(net,
                                         sz,
                                         activation='linear',
                                         weight_decay=self.config.weight_decay,
                                         weights_init=tfi.variance_scaling(),
                                         bias_init=tfi.zeros(),
                                         reuse=reuse,
                                         scope=("fx.h-cnn" + str(j)))
        return logits
示例#11
0
  def construct_embedding(self, embedding_size, vocabulary_size):
    self.vocabulary_size = vocabulary_size
    self.embedding_size = embedding_size
    self.embedding_placeholder = tf.placeholder(tf.float32, [self.vocabulary_size, self.embedding_size])

    with tf.variable_scope(self.config.spen_variable_scope) as scope:
      self.embedding = tf.get_variable("emb", shape=[self.vocabulary_size, self.embedding_size], dtype=tf.float32,
                                       initializer=tfi.zeros(), trainable=True)
    self.embedding_init = self.embedding.assign(self.embedding_placeholder)

    return self
示例#12
0
    def get_energy_rnn_emb(self, xinput, yinput, embedding=None, reuse=False):
        xinput = tf.cast(xinput, tf.int32)
        xinput = tf.nn.embedding_lookup(embedding, xinput)
        with tf.variable_scope(self.config.spen_variable_scope):
            with tf.variable_scope(self.config.fx_variable_scope) as scope:
                logits = self.get_feature_net_rnn(xinput, reuse=reuse)
                mult = tf.multiply(logits, yinput)

                local_e = tflearn.fully_connected(
                    mult,
                    1,
                    activation='linear',
                    weight_decay=self.config.weight_decay,
                    weights_init=tfi.variance_scaling(),
                    bias=None,
                    bias_init=tfi.zeros(),
                    reuse=reuse,
                    scope=("en.l"))

            #with tf.variable_scope(self.config.en_variable_scope) as scope:
            # net = yinput
            # j = 0

            # for (sz, a) in self.config.en_layer_info:
            #   net = tflearn.fully_connected(net, sz,activation=a,
            #                                 weight_decay=self.config.weight_decay,
            #                                 weights_init=tfi.variance_scaling(),
            #                                 bias_init=tfi.zeros(), reuse=reuse,
            #                                 scope=("en.h" + str(j)))
            #   j = j + 1

            # global_e = tflearn.fully_connected(net, 1, activation='linear', weight_decay=self.config.weight_decay,
            #                                    weights_init=tfi.zeros(),
            #                                    bias_init=tfi.zeros(), reuse=reuse,
            #                                    scope="en.g")

            #net = global_e + local_e
            net = local_e
            return tf.squeeze(net)
示例#13
0
 def cnn_javier(self, xinput=None, reuse=False):
     net = tf.reshape(xinput, (-1, 32, 64, 1))
     prev_nFilter = 0
     with tf.variable_scope("pred"):
         for j, (nFilter, kSz,
                 strides) in enumerate(self.config.cnn_layer_info):
             if j == 0:
                 net = tflearn.conv_2d(net,
                                       1,
                                       3,
                                       reuse=False,
                                       scope="baseline.h{}".format(j),
                                       bias=True)
             else:
                 net = tflearn.conv_2d(net,
                                       prev_nFilter,
                                       3,
                                       strides=strides,
                                       reuse=False,
                                       scope="baseline.h{}".format(j),
                                       activation='relu',
                                       bias=True)
             prev_nFilter = nFilter
         sz = self.config.output_num
         std = 1.0 / np.sqrt(sz)
         probs = tflearn.fully_connected(
             net,
             sz,
             activation='sigmoid',
             weight_decay=self.config.weight_decay,
             weights_init=tfi.variance_scaling(),
             bias_init=tfi.zeros(),
             regularizer='L2',
             reuse=False,
             scope=("baseline.fc"))
     return probs
示例#14
0
    def horses_network(self, xinput=None, reuse=False):
        net = xinput
        j = 0
        with tf.variable_scope("pred"):
            net = tf.reshape(net,
                             shape=(-1, self.config.image_width,
                                    self.config.image_height, 3))
            net = tf.reshape(net,
                             shape=(-1, self.config.image_width,
                                    self.config.image_height, 3))
            net = tf.layers.conv2d(inputs=net,
                                   filters=64,
                                   kernel_size=[3, 3],
                                   strides=1,
                                   padding="same",
                                   name="conv0",
                                   activation=tf.nn.relu,
                                   reuse=reuse)

            net = tf.layers.conv2d(inputs=net,
                                   filters=64,
                                   strides=2,
                                   kernel_size=[3, 3],
                                   padding="same",
                                   name="conv1",
                                   activation=tf.nn.relu,
                                   reuse=reuse)
            #  net = tf.layers.batch_normalization()
            # net = tf.layers.max_pooling2d(inputs=net, pool_size=[2, 2], strides=2, name="pool1")
            # net = tflearn.local_response_normalization(net)
            # net = tflearn.dropout(net, 0.8)

            net = tf.layers.conv2d(inputs=net,
                                   filters=128,
                                   kernel_size=[3, 3],
                                   padding="same",
                                   name="conv2",
                                   strides=2,
                                   activation=tf.nn.relu,
                                   reuse=reuse)

            # net = tf.layers.max_pooling2d(inputs=net, pool_size=[2, 2], strides=2, name="pool2")
            # net = tflearn.local_response_normalization(net)
            # net = tflearn.dropout(net, 0.8)

            net = tf.layers.conv2d(inputs=net,
                                   filters=128,
                                   kernel_size=[3, 3],
                                   padding="same",
                                   name="conv3",
                                   strides=2,
                                   activation=tf.nn.relu,
                                   reuse=reuse)
            # net = tf.layers.max_pooling2d(inputs=net, pool_size=[2, 2], strides=2, name="pool3")
            # net = tflearn.local_response_normalization(net)
            # net = tflearn.dropout(net, 0.8)

            # net = tf.reshape(net, [-1, (self.config.image_width / 8) * (self.config.image_height / 8) * 128])

            net = tf.layers.flatten(net)
            # net = tf.layers.max_pooling2d(inputs=net, pool_size=[2, 2], strides=2, name="pool3")
            net = tflearn.dropout(net, 1.0 - self.config.dropout)
            j = 0
            for (sz, a) in self.config.layer_info:
                net = tflearn.fully_connected(
                    net,
                    sz,
                    weight_decay=self.config.weight_decay,
                    weights_init=tfi.variance_scaling(),
                    activation=a,
                    bias_init=tfi.zeros(),
                    regularizer='L2',
                    reuse=reuse,
                    scope=("fx.h" + str(j)))
                net = tflearn.dropout(net, 1.0 - self.config.dropout)

                # net = tflearn.layers.dropout(net, 1 - self.config.dropout)
                j = j + 1

            logits = tflearn.fully_connected(
                net,
                self.config.output_num * self.config.dimension,
                activation='linear',
                regularizer='L2',
                weight_decay=self.config.weight_decay,
                weights_init=tfi.variance_scaling(),
                bias=False,
                reuse=reuse,
                scope=("fx.fc"))
        return logits
示例#15
0
    def mlph_prediction_network(self, xinput=None, reuse=False):
        hpart = xinput[:, :self.config.hidden_num]
        xpart = xinput[:, self.config.hidden_num:]

        with tf.variable_scope("hpred") as scope:
            hnet = tflearn.fully_connected(
                hpart,
                200,
                regularizer='L2',
                activation='relu',
                weight_decay=self.config.weight_decay,
                weights_init=tfi.zeros(),
                bias_init=tfi.zeros(),
                reuse=reuse,
                scope=("f.h0"))

        with tf.variable_scope("xpred") as scope:
            xnet = tflearn.fully_connected(
                xpart,
                1024,
                regularizer='L2',
                activation='relu',
                weight_decay=self.config.weight_decay,
                weights_init=tfi.variance_scaling(),
                bias_init=tfi.zeros(),
                reuse=reuse,
                scope=("f.x0"))
        j = 0
        net = tf.concat((hnet, xnet), axis=1)
        with tf.variable_scope("pred") as scope:
            for (sz, a) in self.config.pred_layer_info:
                print(sz, a)
                net = tflearn.fully_connected(
                    net,
                    sz,
                    regularizer='L2',
                    weight_decay=self.config.weight_decay,
                    weights_init=tfi.variance_scaling(),
                    bias_init=tfi.zeros(),
                    reuse=reuse,
                    scope=("ph." + str(j)))
                net = tf.nn.relu(net)
                net = tflearn.layers.dropout(net, 1 - self.config.dropout)
                j = j + 1

            net = tflearn.fully_connected(
                net,
                self.config.output_num * self.config.dimension,
                activation='linear',
                weight_decay=self.config.weight_decay,
                weights_init=tfi.variance_scaling(),
                reuse=reuse,
                bias=False,
                regularizer='L2',
                scope=("ph.fc"))
        if self.config.dimension == 1:
            return tf.nn.sigmoid(net)
        else:
            cat_output = tf.reshape(
                net, (-1, self.config.output_num, self.config.dimension))
            return tf.nn.softmax(cat_output, dim=2)
示例#16
0
    def energy_cnn_image(self,
                         xinput=None,
                         yinput=None,
                         embedding=None,
                         reuse=False):
        image_size = tf.cast(tf.sqrt(tf.cast(tf.shape(xinput)[1], tf.float64)),
                             tf.int32)
        output_size = yinput.get_shape().as_list()[-1]
        with tf.variable_scope(self.config.spen_variable_scope):
            xinput = tf.reshape(xinput, shape=(-1, image_size, image_size))
            conv1 = tf.layers.conv2d(inputs=tf.expand_dims(xinput, axis=3),
                                     filters=8,
                                     kernel_size=[3, 3],
                                     padding="same",
                                     name="conv1",
                                     activation=tf.nn.relu,
                                     reuse=reuse)
            conv1 = tf.nn.dropout(conv1, 1.0 - self.config.dropout)
            pool1 = tf.layers.max_pooling2d(inputs=conv1,
                                            pool_size=[2, 2],
                                            strides=2,
                                            name="spen/pool1")
            conv2 = tf.layers.conv2d(inputs=pool1,
                                     filters=16,
                                     kernel_size=[3, 3],
                                     padding="same",
                                     name="conv2",
                                     activation=tf.nn.relu,
                                     reuse=reuse)
            conv2 = tf.nn.dropout(conv2, 1.0 - self.config.dropout)
            pool2 = tf.layers.max_pooling2d(inputs=conv2,
                                            pool_size=[2, 2],
                                            strides=2,
                                            name="spen/pool2")

            conv3 = tf.layers.conv2d(inputs=pool2,
                                     filters=32,
                                     kernel_size=[3, 3],
                                     padding="same",
                                     name="conv3",
                                     activation=tf.nn.relu,
                                     reuse=reuse)
            conv3 = tf.nn.dropout(conv3, 1.0 - self.config.dropout)
            pool3 = tf.layers.max_pooling2d(inputs=conv3,
                                            pool_size=[2, 2],
                                            strides=2,
                                            name="spen/pool3")
            self.state_dim = 3200
            self.encode_embeddings = tf.reshape(pool3, [-1, self.state_dim])

            with tf.variable_scope(self.config.fx_variable_scope):
                logits = self.get_feature_net_mlp(self.encode_embeddings,
                                                  output_size,
                                                  reuse=reuse)

                mult = tf.multiply(logits, yinput)

            local_e = tflearn.fully_connected(
                mult,
                1,
                activation='linear',
                weight_decay=self.config.weight_decay,
                weights_init=tfi.variance_scaling(),
                bias=None,
                bias_init=tfi.zeros(),
                reuse=reuse,
                scope="en.l")

            with tf.variable_scope(self.config.en_variable_scope) as scope:
                net = yinput
                j = 0

                for (sz, a) in self.config.en_layer_info:
                    net = tflearn.fully_connected(
                        net,
                        sz,
                        activation=a,
                        weight_decay=self.config.weight_decay,
                        weights_init=tfi.variance_scaling(),
                        bias_init=tfi.zeros(),
                        reuse=reuse,
                        scope=("en.h" + str(j)))
                    j = j + 1

                global_e = tflearn.fully_connected(
                    net,
                    1,
                    activation='linear',
                    weight_decay=self.config.weight_decay,
                    weights_init=tfi.zeros(),
                    bias_init=tfi.zeros(),
                    reuse=reuse,
                    scope=("en.h" + str(j)))

        net = local_e + global_e

        return tf.squeeze(net)
示例#17
0
    def get_energy_rnn_mlp_emb(self,
                               xinput,
                               yinput,
                               embedding=None,
                               reuse=False):
        xinput = tf.cast(xinput, tf.int32)
        xinput = tf.nn.embedding_lookup(embedding, xinput)
        output_size = yinput.get_shape().as_list()[-1]
        with tf.variable_scope(self.config.spen_variable_scope):
            with tf.variable_scope(self.config.fx_variable_scope) as scope:
                logits2 = self.get_feature_net_rnn(xinput, reuse=reuse)
                logits3 = self.get_feature_net_mlp(xinput,
                                                   output_size,
                                                   reuse=reuse)  #+ logits2

                #mult_ = tf.multiply(logits, yinput)
                mult2 = tf.multiply(logits2, yinput)
                mult3 = tf.multiply(logits3, yinput)

                local_e3 = tflearn.fully_connected(
                    mult3,
                    1,
                    activation='linear',
                    weight_decay=self.config.weight_decay,
                    weights_init=tfi.variance_scaling(),
                    bias=None,
                    bias_init=tfi.zeros(),
                    reuse=reuse,
                    scope=("fx3.b0"))

                local_e2 = tflearn.fully_connected(
                    mult2,
                    1,
                    activation='linear',
                    weight_decay=self.config.weight_decay,
                    weights_init=tfi.variance_scaling(),
                    bias=None,
                    bias_init=tfi.zeros(),
                    reuse=reuse,
                    scope=("fx2.b0"))

                #local_e = tflearn.fully_connected(mult_, 1, activation='linear', weight_decay=self.config.weight_decay,
                #                                  weights_init=tfi.variance_scaling(),
                #                                  bias=None,
                #                                  bias_init=tfi.zeros(), reuse=reuse, scope=("fx.b0"))

            j = 0

            with tf.variable_scope(self.config.en_variable_scope) as scope:
                net = yinput
                j = 0

                for (sz, a) in self.config.en_layer_info:
                    # std = np.sqrt(2.0) / np.sqrt(sz)
                    net = tflearn.fully_connected(
                        net,
                        sz,
                        activation=a,
                        weight_decay=self.config.weight_decay,
                        weights_init=tfi.variance_scaling(),
                        bias_init=tfi.zeros(),
                        reuse=reuse,
                        scope=("en.h" + str(j)))
                    #  net = tflearn.activations.relu(net)
                    #net = tflearn.layers.normalization.batch_normalization(net, reuse=reuse, scope=("bn.en" + str(j)))

                    j = j + 1

                global_e = tflearn.fully_connected(
                    net,
                    1,
                    activation='linear',
                    weight_decay=self.config.weight_decay,
                    weights_init=tfi.zeros(),
                    bias_init=tfi.zeros(),
                    reuse=reuse,
                    scope=("en.h" + str(j)))
                # en = tflearn.layers.normalization.batch_normalization(en, reuse=ru, scope=("en." + str(j)))

                if reuse:
                    scope.reuse_variables()

                net = global_e + local_e3 + local_e2  #+ local_e + local_e3

            return tf.squeeze(net)
示例#18
0
def get_energy_mlp(self,
                   xinput=None,
                   yinput=None,
                   embedding=None,
                   reuse=False):
    output_size = yinput.get_shape().as_list()[-1]
    with tf.variable_scope(self.config.spen_variable_scope):
        with tf.variable_scope(self.config.fx_variable_scope) as scope:
            net = xinput
            j = 0
            for (sz, a) in self.config.layer_info:
                net = tflearn.fully_connected(
                    net,
                    sz,
                    weight_decay=self.config.weight_decay,
                    activation=a,
                    weights_init=tfi.variance_scaling(),
                    bias_init=tfi.zeros(),
                    regularizer='L2',
                    reuse=reuse,
                    scope=("fx.h" + str(j)))
                net = tflearn.dropout(net, 1.0 - self.config.dropout)
                j = j + 1
            logits = tflearn.fully_connected(
                net,
                output_size,
                activation='linear',
                regularizer='L2',
                weight_decay=self.config.weight_decay,
                weights_init=tfi.variance_scaling(),
                bias_init=tfi.zeros(),
                reuse=reuse,
                scope="fx.fc")

            mult = logits * yinput
            local_e = tf.reduce_sum(mult, axis=1)
        with tf.variable_scope(self.config.en_variable_scope) as scope:
            j = 0
            net = yinput
            for (sz, a) in self.config.en_layer_info:
                net = tflearn.fully_connected(
                    net,
                    sz,
                    weight_decay=self.config.weight_decay,
                    weights_init=tfi.variance_scaling(),
                    activation=a,
                    bias=False,
                    reuse=reuse,
                    regularizer='L2',
                    scope=("en.h" + str(j)))

                j = j + 1
            global_e = tf.squeeze(
                tflearn.fully_connected(net,
                                        1,
                                        activation='linear',
                                        weight_decay=self.config.weight_decay,
                                        weights_init=tfi.variance_scaling(),
                                        bias=False,
                                        reuse=reuse,
                                        regularizer='L2',
                                        scope=("en.g")))

    return tf.squeeze(tf.add(local_e, global_e))