コード例 #1
0
def model(dim_embedding, batch_s):
    # Building model
    input_layer = tflearn.input_data(shape=[None, DIM_FEATURES], dtype=tf.float64, name='input')
    input_text = input_layer[:, 0:DIM_TEXT]
    input_citation = input_layer[:, DIM_TEXT:DIM_TEXT + DIM_CITATION]
    input_section = input_layer[:, DIM_TEXT + DIM_CITATION:DIM_TEXT + DIM_CITATION + NUM_SEC]
    input_subsection = input_layer[:, DIM_TEXT + DIM_CITATION + NUM_SEC:]
    textual_inf = tflearn.embedding(input_text, input_dim=142698, output_dim=dim_embedding, name='word_embedding')
    section_embedding = tf.Variable(tf.random_normal([NUM_SEC, 128]), name='group_embedding')
    subsection_embedding = tf.Variable(tf.random_normal([NUM_SUBS, 128]), name='group_embedding')
    # section_embedding = tflearn.embedding(input_section, input_dim=NUM_SEC, output_dim=dim_embedding, name='section_embedding')
    # subsection_embedding = tflearn.embedding(input_subsection, input_dim=NUM_SUBS, output_dim=dim_embedding,
    #                                          name='subsection_embedding')
    class_embedding = tf.Variable(tf.random_normal([NUM_CLASS, dim_embedding]), name='class_embedding')

    textual_embedding = tflearn.lstm(textual_inf, dim_embedding, dropout=0.8, name='lstm')
    network = tf.concat([textual_embedding, input_citation], 1)
    network = tflearn.fully_connected(network, dim_embedding, activation='elu')
    network = _cat_weighted(network, section_embedding)
    network = _cat_weighted(network, subsection_embedding)
    network = tf.matmul(network, tf.transpose(class_embedding), name='class_weight')
    # network = tflearn.softmax(network)
    network = tflearn.sigmoid(network)
    # network = tflearn.regression(network, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy')
    # tflearn.objectives.binary_crossentropy(y_pred, y_true) Computes sigmoid cross entropy between y_pred (logits) and y_true (labels).
    # network = tflearn.regression(network, optimizer='adam', learning_rate=0.001, dtype=tf.float64, batch_size=batch_s,
    #                              loss='binary_crossentropy')
    network = tflearn.regression(network, optimizer='adam', learning_rate=0.001, dtype=tf.float64, batch_size=batch_s,
                                 loss=loss_multilabel())
    return network
コード例 #2
0
    def model(self):
        # Building model
        # input_layer = tflearn.input_data(shape=[None, self.DIM_FEATURES], dtype=tf.float64, name='input')
        # input_text = input_layer[:, 0:self.DIM_TEXT]
        # input_citation = input_layer[:, self.DIM_TEXT:self.DIM_TEXT + self.DIM_CITATION]
        # self.input_section = input_layer[:,
        #                      self.DIM_TEXT + self.DIM_CITATION:self.DIM_TEXT + self.DIM_CITATION + self.NUM_SEC]
        # self.input_subsection = input_layer[:, self.DIM_TEXT + self.DIM_CITATION + self.NUM_SEC:]
        textual_inf = tflearn.embedding(self.input_text, input_dim=self.vocab_size, output_dim=self.embedding_size,
                                        name='word_embedding')
        section_embedding = tf.Variable(tf.random_normal([self.NUM_SEC, self.embedding_size]), name='group_embedding')
        subsection_embedding = tf.Variable(tf.random_normal([self.NUM_SUBS, self.embedding_size]),
                                           name='group_embedding')
        # section_embedding = tflearn.embedding(input_section, input_dim=NUM_SEC, output_dim=dim_embedding, name='section_embedding')
        # subsection_embedding = tflearn.embedding(input_subsection, input_dim=NUM_SUBS, output_dim=dim_embedding,
        #                                          name='subsection_embedding')
        class_embedding = tf.Variable(tf.random_normal([self.NUM_CLASS, self.embedding_size]), name='class_embedding')

        textual_embedding = tflearn.lstm(textual_inf, self.lstm_hidden_size, dropout=0.8, name='lstm')
        network = tf.concat([textual_embedding, self.input_citation], 1)
        network = tflearn.fully_connected(network, self.embedding_size, activation='elu')
        self.weight_section, network = self._cat_weighted(network, section_embedding)
        self.weight_subsection, network = self._cat_weighted(network, subsection_embedding)
        self.weight_class = tf.matmul(network, tf.transpose(class_embedding), name='class_weight')
        network = tflearn.sigmoid(self.weight_class)
        return network
コード例 #3
0
ファイル: scene_rcae.py プロジェクト: tartaruszen/gad-1
def encoder(inputs, hidden_layer):
    nb_feature = 64
    net = tflearn.conv_2d(inputs, 16, 3, strides=2)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L1", net.get_shape()
    print "========================"

    net = tflearn.conv_2d(net, 16, 3, strides=1)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L2", net.get_shape()
    print "========================"

    net = tflearn.conv_2d(net, 32, 3, strides=2)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L3", net.get_shape()
    print "========================"
    net = tflearn.conv_2d(net, 32, 3, strides=1)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L4", net.get_shape()
    print "========================"
    net = tflearn.flatten(net)
    #net = tflearn.fully_connected(net, nb_feature,activation="sigmoid")
    net = tflearn.fully_connected(net, nb_feature)

    h = net.W
    print "Encoder Weights shape", h.get_shape()

    net = tflearn.batch_normalization(net)
    net = tflearn.sigmoid(net)
    print "========================"
    print "hidden", net.get_shape()
    print "========================"

    return [net, h]
コード例 #4
0
def encoder(inputs,hidden_layer):
    net = tflearn.conv_2d(inputs, 16, 3, strides=2)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L1",net.get_shape()
    print "========================"

    net = tflearn.conv_2d(net, 16, 3, strides=1)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L2",net.get_shape()
    print "========================"

    net = tflearn.conv_2d(net, 32, 3, strides=2)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L3",net.get_shape()
    print "========================"
    net = tflearn.conv_2d(net, 32, 3, strides=1)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L4",net.get_shape()
    print "========================"
    net = tflearn.flatten(net)
    #net = tflearn.fully_connected(net, nb_feature,activation="sigmoid")
    net = tflearn.fully_connected(net, nb_feature)
    hidden_layer = net
    net = tflearn.batch_normalization(net)
    net = tflearn.sigmoid(net)
    print "========================"
    print "hidden",net.get_shape()
    print "========================"

    return [net,hidden_layer]
コード例 #5
0
ファイル: RCAE.py プロジェクト: kiminh/AMAD
	def encoder(self,inputs,hidden_layer):
		net = tflearn.conv_2d(inputs, 16, 3, strides=2)
		net = tflearn.batch_normalization(net)
		net = tflearn.elu(net)
		print "========================"
		print "enc-L1",net.get_shape()
		print "========================"

		net = tflearn.conv_2d(net, 16, 3, strides=1)
		net = tflearn.batch_normalization(net)
		net = tflearn.elu(net)
		print "========================"
		print "enc-L2",net.get_shape()
		print "========================"

		net = tflearn.conv_2d(net, 32, 3, strides=2)
		net = tflearn.batch_normalization(net)
		net = tflearn.elu(net)
		print "========================"
		print "enc-L3",net.get_shape()
		print "========================"
		net = tflearn.conv_2d(net, 32, 3, strides=1)
		net = tflearn.batch_normalization(net)
		net = tflearn.elu(net)
		print "========================"
		print "enc-L4",net.get_shape()
		print "========================"
		net = tflearn.flatten(net)
		#net = tflearn.fully_connected(net, nb_feature,activation="sigmoid")
		net = tflearn.fully_connected(net, self.instance_dim)
		hidden_layer = net
		net = tflearn.batch_normalization(net)
		net = tflearn.sigmoid(net)
		print "========================"
		print "hidden",net.get_shape()
		print "========================"

		return [net,hidden_layer]
def encoder(inputs):
    net = tflearn.conv_2d(inputs, 16, 3, strides=2)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L1",net.get_shape()
    print "========================"

    net = tflearn.conv_2d(net, 16, 3, strides=1)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L2",net.get_shape()
    print "========================"

    net = tflearn.conv_2d(net, 32, 3, strides=2)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "enc-L3",net.get_shape()

    net = tflearn.conv_2d(net, 32, 3, strides=1)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L4",net.get_shape()
    print "========================"

    net = tflearn.flatten(net)
    net = tflearn.fully_connected(net, nb_feature)
    hidden_layer = net
    net = tflearn.batch_normalization(net)
    net = tflearn.sigmoid(net)
    print "========================"
    print "enc-hidden_L",net.get_shape()
    print "========================"


    return [net,hidden_layer]