def vgg16(input, num_class):

    x = zqtflearn.conv_2d(input, 64, 3, activation='relu', scope='conv1_1')
    x = zqtflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2')
    x = zqtflearn.max_pool_2d(x, 2, strides=2, name='maxpool1')

    x = zqtflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1')
    x = zqtflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2')
    x = zqtflearn.max_pool_2d(x, 2, strides=2, name='maxpool2')

    x = zqtflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1')
    x = zqtflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2')
    x = zqtflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3')
    x = zqtflearn.max_pool_2d(x, 2, strides=2, name='maxpool3')

    x = zqtflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1')
    x = zqtflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2')
    x = zqtflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3')
    x = zqtflearn.max_pool_2d(x, 2, strides=2, name='maxpool4')

    x = zqtflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1')
    x = zqtflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2')
    x = zqtflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3')
    x = zqtflearn.max_pool_2d(x, 2, strides=2, name='maxpool5')

    x = zqtflearn.fully_connected(x, 4096, activation='relu', scope='fc6')
    x = zqtflearn.dropout(x, 0.5, name='dropout1')

    x = zqtflearn.fully_connected(x, 4096, activation='relu', scope='fc7')
    x = zqtflearn.dropout(x, 0.5, name='dropout2')

    x = zqtflearn.fully_connected(x, num_class, activation='softmax', scope='fc8',
                                  restore=False)

    return x
 def make_core_network(network):
     dense1 = zqtflearn.fully_connected(network, 64, activation='tanh',
                                        regularizer='L2', weight_decay=0.001, name="dense1")
     dropout1 = zqtflearn.dropout(dense1, 0.8)
     dense2 = zqtflearn.fully_connected(dropout1, 64, activation='tanh',
                                        regularizer='L2', weight_decay=0.001, name="dense2")
     dropout2 = zqtflearn.dropout(dense2, 0.8)
     softmax = zqtflearn.fully_connected(dropout2, 10, activation='softmax', name="softmax")
     return softmax
    def deep_model(self, wide_inputs, n_inputs, n_nodes=[100, 50], use_dropout=False):
        '''
        Model - deep, i.e. two-layer fully connected network model
        '''
        cc_input_var = {}
        cc_embed_var = {}
        flat_vars = []
        if self.verbose:
            print ("--> deep model: %s categories, %d continuous" % (len(self.categorical_columns), n_inputs))
        for cc, cc_size in self.categorical_columns.items():
            cc_input_var[cc] = zqtflearn.input_data(shape=[None, 1], name="%s_in" % cc, dtype=tf.int32)
            # embedding layers only work on CPU!  No GPU implementation in tensorflow, yet!
            cc_embed_var[cc] = zqtflearn.layers.embedding_ops.embedding(cc_input_var[cc], cc_size, 8, name="deep_%s_embed" % cc)
            if self.verbose:
                print ("    %s_embed = %s" % (cc, cc_embed_var[cc]))
            flat_vars.append(tf.squeeze(cc_embed_var[cc], squeeze_dims=[1], name="%s_squeeze" % cc))

        network = tf.concat([wide_inputs] + flat_vars, 1, name="deep_concat")
        for k in range(len(n_nodes)):
            network = zqtflearn.fully_connected(network, n_nodes[k], activation="relu", name="deep_fc%d" % (k + 1))
            if use_dropout:
                network = zqtflearn.dropout(network, 0.5, name="deep_dropout%d" % (k + 1))
        if self.verbose:
            print ("Deep model network before output %s" % network)
        network = zqtflearn.fully_connected(network, 1, activation="linear", name="deep_fc_output", bias=False)
        network = tf.reshape(network, [-1, 1])	# so that accuracy is binary_accuracy
        if self.verbose:
            print ("Deep model network %s" % network)
        return network
    def deep_model(self, wide_inputs, n_inputs, n_nodes=[100, 50], use_dropout=False):
        '''
        Model - deep, i.e. two-layer fully connected network model
        '''
        cc_input_var = {}
        cc_embed_var = {}
        flat_vars = []
        if self.verbose:
            print ("--> deep model: %s categories, %d continuous" % (len(self.categorical_columns), n_inputs))
        for cc, cc_size in self.categorical_columns.items():
            cc_input_var[cc] = zqtflearn.input_data(shape=[None, 1], name="%s_in" % cc, dtype=tf.int32)#[?,1]
            # embedding layers only work on CPU!  No GPU implementation in tensorflow, yet!
            cc_embed_var[cc] = zqtflearn.layers.embedding_ops.embedding(cc_input_var[cc], cc_size, 8, name="deep_%s_embed" % cc) #[?,1,embedding_size = 8]
            if self.verbose:
                print ("    %s_embed = %s" % (cc, cc_embed_var[cc]))
            flat_vars.append(tf.squeeze(cc_embed_var[cc], squeeze_dims=[1], name="%s_squeeze" % cc)) #[?,8]


        network = tf.concat([wide_inputs] + flat_vars, axis = 1, name="deep_concat") #x=xigma(dim of each element in flat_vars) + wide_inputs.size(1) [?,x]
        #这里是合并的步骤,合并采用的是前后拼接的方式。


        #在这里是合并之后的逻辑,对于合并之后的输入共同处理。
        for k in range(len(n_nodes)):#连续的两个全连接。
            network = zqtflearn.fully_connected(network, n_nodes[k], activation="relu", name="deep_fc%d" % (k + 1)) #默认应该是用bais的。要不然下面为什么要写bias=False
            if use_dropout:
                network = zqtflearn.dropout(network, 0.5, name="deep_dropout%d" % (k + 1))
        if self.verbose:
            print ("Deep model network before output %s" % network)
        network = zqtflearn.fully_connected(network, 1, activation="linear", name="deep_fc_output", bias=False) #[?,1]
        network = tf.reshape(network, [-1, 1])	# so that accuracy is binary_accuracy added by zhengquan ,不reshape不也是[?,1]的吗?可能如果最后的输出维度是1的话,结果是[?]的尺寸
        if self.verbose:
            print ("Deep model network %s" % network)
        return network
Ejemplo n.º 5
0
    def test_sequencegenerator(self):

        with tf.Graph().as_default():
            text = "123456789101234567891012345678910123456789101234567891012345678910"
            maxlen = 5

            X, Y, char_idx = \
                zqtflearn.data_utils.string_to_semi_redundant_sequences(text, seq_maxlen=maxlen, redun_step=3)

            g = zqtflearn.input_data(shape=[None, maxlen, len(char_idx)])
            g = zqtflearn.lstm(g, 32)
            g = zqtflearn.dropout(g, 0.5)
            g = zqtflearn.fully_connected(g, len(char_idx), activation='softmax')
            g = zqtflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                                     learning_rate=0.1)

            m = zqtflearn.SequenceGenerator(g, dictionary=char_idx,
                                            seq_maxlen=maxlen,
                                            clip_gradients=5.0)
            m.fit(X, Y, validation_set=0.1, n_epoch=100, snapshot_epoch=False)
            res = m.generate(10, temperature=.5, seq_seed="12345")
            #self.assertEqual(res, "123456789101234", "SequenceGenerator test failed! Generated sequence: " + res + " expected '123456789101234'")

            # Testing save method
            m.save("test_seqgen.zqtflearn")
            self.assertTrue(os.path.exists("test_seqgen.zqtflearn.index"))

            # Testing load method
            m.load("test_seqgen.zqtflearn")
            res = m.generate(10, temperature=.5, seq_seed="12345")
Ejemplo n.º 6
0
    def deep_model(self,
                   wide_inputs,
                   deep_inpus,
                   n_inputs,
                   n_nodes=[100, 50],
                   use_dropout=False):
        '''
        Model - deep, i.e. two-layer fully connected network model
        '''
        self.vocab, self.embedding = self.read_embedding(
            embedding_path="I haven't deal with it")
        self.embedding = np.array(self.embedding, dtype=np.float32)

        net = zqtflearn.layers.embedding_ops.embedding(
            deep_inpus,
            len(self.vocab),
            self.embedding.shape[1],
            trainable=False,
            name="deep_video_ids_embed" % cc)

        network = tf.concat(
            [wide_inputs, net], axis=1, name="deep_concat"
        )  #x=xigma(dim of each element in flat_vars) + wide_inputs.size(1) [?,x]
        #这里是合并的步骤,合并采用的是前后拼接的方式。

        #在这里是合并之后的逻辑,对于合并之后的输入共同处理。
        for k in range(len(n_nodes)):  #连续的两个全连接。
            network = zqtflearn.fully_connected(
                network,
                n_nodes[k],
                activation="relu",
                name="deep_fc%d" % (k + 1))  #默认应该是用bais的。要不然下面为什么要写bias=False
            if use_dropout:
                network = zqtflearn.dropout(network,
                                            0.5,
                                            name="deep_dropout%d" % (k + 1))
        if self.verbose:
            print("Deep model network before output %s" % network)
        network = zqtflearn.fully_connected(network,
                                            1,
                                            activation="linear",
                                            name="deep_fc_output",
                                            bias=False)  #[?,1]
        network = tf.reshape(
            network, [-1, 1]
        )  # so that accuracy is binary_accuracy added by zhengquan ,不reshape不也是[?,1]的吗?可能如果最后的输出维度是1的话,结果是[?]的尺寸
        if self.verbose:
            print("Deep model network %s" % network)
        return network
Ejemplo n.º 7
0
    def test_core_layers(self):

        X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
        Y_nand = [[1.], [1.], [1.], [0.]]
        Y_or = [[0.], [1.], [1.], [1.]]

        # Graph definition
        with tf.Graph().as_default():
            # Building a network with 2 optimizers
            g = zqtflearn.input_data(shape=[None, 2])

            # Nand operator definition
            g_nand = zqtflearn.fully_connected(g, 32, activation='linear')
            g_nand = zqtflearn.fully_connected(g_nand, 32, activation='linear')
            g_nand = zqtflearn.fully_connected(g_nand, 1, activation='sigmoid')
            g_nand = zqtflearn.regression(g_nand, optimizer='sgd',
                                          learning_rate=2.,
                                          loss='binary_crossentropy')
            # Or operator definition
            g_or = zqtflearn.fully_connected(g, 32, activation='linear')
            g_or = zqtflearn.fully_connected(g_or, 32, activation='linear')
            g_or = zqtflearn.fully_connected(g_or, 1, activation='sigmoid')
            g_or = zqtflearn.regression(g_or, optimizer='sgd',
                                        learning_rate=2.,
                                        loss='binary_crossentropy')
            # XOR merging Nand and Or operators
            g_xor = zqtflearn.merge([g_nand, g_or], mode='elemwise_mul')

            # Training
            m = zqtflearn.DNN(g_xor)
            m.fit(X, [Y_nand, Y_or], n_epoch=400, snapshot_epoch=False)

            # Testing
            self.assertLess(m.predict([[0., 0.]])[0][0], 0.01)
            self.assertGreater(m.predict([[0., 1.]])[0][0], 0.9)
            self.assertGreater(m.predict([[1., 0.]])[0][0], 0.9)
            self.assertLess(m.predict([[1., 1.]])[0][0], 0.01)

        # Bulk Tests
        with tf.Graph().as_default():
            net = zqtflearn.input_data(shape=[None, 2])
            net = zqtflearn.flatten(net)
            net = zqtflearn.reshape(net, new_shape=[-1])
            net = zqtflearn.activation(net, 'relu')
            net = zqtflearn.dropout(net, 0.5)
            net = zqtflearn.single_unit(net)
Ejemplo n.º 8
0
    def test_sequencegenerator_words(self):

        with tf.Graph().as_default():
            text = ["hello","world"]*100
            word_idx = {"hello": 0, "world": 1}
            maxlen = 2

            vec = [x for x in map(word_idx.get, text) if x is not None]

            sequences = []
            next_words = []
            for i in range(0, len(vec) - maxlen, 3):
                sequences.append(vec[i: i + maxlen])
                next_words.append(vec[i + maxlen])

            X = np.zeros((len(sequences), maxlen, len(word_idx)), dtype=np.bool)
            Y = np.zeros((len(sequences), len(word_idx)), dtype=np.bool)
            for i, seq in enumerate(sequences):
                for t, idx in enumerate(seq):
                    X[i, t, idx] = True
                    Y[i, next_words[i]] = True

            g = zqtflearn.input_data(shape=[None, maxlen, len(word_idx)])
            g = zqtflearn.lstm(g, 32)
            g = zqtflearn.dropout(g, 0.5)
            g = zqtflearn.fully_connected(g, len(word_idx), activation='softmax')
            g = zqtflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                                     learning_rate=0.1)

            m = zqtflearn.SequenceGenerator(g, dictionary=word_idx,
                                            seq_maxlen=maxlen,
                                            clip_gradients=5.0)
            m.fit(X, Y, validation_set=0.1, n_epoch=100, snapshot_epoch=False)
            res = m.generate(4, temperature=.5, seq_seed=["hello","world"])
            res_str = " ".join(res[-2:])
            self.assertEqual(res_str, "hello world", "SequenceGenerator (word level) test failed! Generated sequence: " + res_str + " expected 'hello world'")

            # Testing save method
            m.save("test_seqgen_word.zqtflearn")
            self.assertTrue(os.path.exists("test_seqgen_word.zqtflearn.index"))

            # Testing load method
            m.load("test_seqgen_word.zqtflearn")
            res = m.generate(4, temperature=.5, seq_seed=["hello","world"])
            res_str = " ".join(res[-2:])
            self.assertEqual(res_str, "hello world", "Reloaded SequenceGenerator (word level) test failed! Generated sequence: " + res_str + " expected 'hello world'")
    def deep_model(self,
                   wide_inputs,
                   deep_inputs,
                   n_inputs,
                   n_nodes=[100, 50],
                   use_dropout=False):
        '''
        Model - deep, i.e. two-layer fully connected network model
        '''
        vocab = pickle.load(open("video_ids.pkl", "rb"))
        vocab = list(vocab)

        self.vocab = {}
        for idx, wd in enumerate(vocab):
            self.vocab[wd] = idx
        self.vocab["[UNK]"] = len(self.vocab)
        if os.path.exists('self_embedding.pkl'):
            self.embedding = pickle.load(open("self_embedding.pkl", "rb"))
            print("making new self.embedding!!!")
        else:
            self.embedding = self.read_embedding(
                embedding_path='videoID_vector.pkl', vocab=self.vocab)
            pickle.dump(self.embedding, open("self_embedding.pkl", "wb"))

        net = zqtflearn.layers.embedding_ops.embedding(
            deep_inputs,
            len(self.vocab),
            self.embedding.shape[1],
            trainable=False,
            name="deep_video_ids_embed")
        net = tf.squeeze(net, squeeze_dims=[1], name="video_ids_squeeze")
        # net = zqtflearn.fully_connected(net, 108 , activation="relu",name="deep_fc_108" )
        # net = zqtflearn.fully_connected(net, 54, activation="relu", name="deep_fc_54")
        network = tf.concat(
            [wide_inputs, net], axis=1, name="deep_concat"
        )  # x=xigma(dim of each element in flat_vars) + wide_inputs.size(1) [?,x]

        print("n_nodes = ", n_nodes)
        for k in range(len(n_nodes)):
            network = zqtflearn.fully_connected(
                network,
                n_nodes[k],
                activation="relu",
                name="deep_fc%d" % (k + 1))  # 默认应该是用bais的。要不然下面为什么要写bias=False
            if use_dropout:
                network = zqtflearn.dropout(network,
                                            0.5,
                                            name="deep_dropout%d" % (k + 1))
        if self.verbose:
            print("Deep model network before output %s" % network)
        network = zqtflearn.fully_connected(network,
                                            1,
                                            activation="linear",
                                            name="deep_fc_output",
                                            bias=False)  # [?,1]

        network = tf.reshape(
            network, [-1, 1]
        )  # so that accuracy is binary_accuracy added by zhengquan ,不reshape不也是[?,1]的吗?可能如果最后的输出维度是1的话,结果是[?]的尺寸
        if self.verbose:
            print("Deep model network %s" % network)
        return network
Ejemplo n.º 10
0
import zqtflearn

# Data loading and preprocessing
import zqtflearn.datasets.mnist as mnist

X, Y, testX, testY = mnist.load_data(one_hot=True)

# Building deep neural network
input_layer = zqtflearn.input_data(shape=[None, 784])
dense1 = zqtflearn.fully_connected(input_layer,
                                   64,
                                   activation='tanh',
                                   regularizer='L2',
                                   weight_decay=0.001)
dropout1 = zqtflearn.dropout(dense1, 0.8)
dense2 = zqtflearn.fully_connected(dropout1,
                                   64,
                                   activation='tanh',
                                   regularizer='L2',
                                   weight_decay=0.001)
dropout2 = zqtflearn.dropout(dense2, 0.8)
softmax = zqtflearn.fully_connected(dropout2, 10, activation='softmax')

# Regression using SGD with learning rate decay and Top-3 accuracy
sgd = zqtflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000)
top_k = zqtflearn.metrics.Top_k(3)
net = zqtflearn.regression(softmax,
                           optimizer=sgd,
                           metric=top_k,
                           loss='categorical_crossentropy')
Ejemplo n.º 11
0
mnist_data = mnist.read_data_sets(one_hot=True)

# User defined placeholders
with tf.Graph().as_default():
    # Placeholders for data and labels
    X = tf.placeholder(shape=(None, 784), dtype=tf.float32)
    Y = tf.placeholder(shape=(None, 10), dtype=tf.float32)

    net = tf.reshape(X, [-1, 28, 28, 1])

    # Using TFLearn wrappers for network building
    net = zqtflearn.conv_2d(net, 32, 3, activation='relu')
    net = zqtflearn.max_pool_2d(net, 2)
    net = zqtflearn.local_response_normalization(net)
    net = zqtflearn.dropout(net, 0.8)
    net = zqtflearn.conv_2d(net, 64, 3, activation='relu')
    net = zqtflearn.max_pool_2d(net, 2)
    net = zqtflearn.local_response_normalization(net)
    net = zqtflearn.dropout(net, 0.8)
    net = zqtflearn.fully_connected(net, 128, activation='tanh')
    net = zqtflearn.dropout(net, 0.8)
    net = zqtflearn.fully_connected(net, 256, activation='tanh')
    net = zqtflearn.dropout(net, 0.8)
    net = zqtflearn.fully_connected(net, 10, activation='linear')

    # Defining other ops using Tensorflow
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits_v2(logits=net, labels=Y))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)
Ejemplo n.º 12
0
from zqtflearn.data_utils import *

path = "US_Cities.txt"
if not os.path.isfile(path):
    context = ssl._create_unverified_context()
    moves.urllib.request.urlretrieve("https://raw.githubusercontent.com/tflearn/tflearn.github.io/master/resources/US_Cities.txt", path, context=context)

maxlen = 20

string_utf8 = open(path, "r").read().decode('utf-8')
X, Y, char_idx = \
    string_to_semi_redundant_sequences(string_utf8, seq_maxlen=maxlen, redun_step=3)

g = zqtflearn.input_data(shape=[None, maxlen, len(char_idx)])
g = zqtflearn.lstm(g, 512, return_seq=True)
g = zqtflearn.dropout(g, 0.5)
g = zqtflearn.lstm(g, 512)
g = zqtflearn.dropout(g, 0.5)
g = zqtflearn.fully_connected(g, len(char_idx), activation='softmax')
g = zqtflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                         learning_rate=0.001)

m = zqtflearn.SequenceGenerator(g, dictionary=char_idx,
                                seq_maxlen=maxlen,
                                clip_gradients=5.0,
                                checkpoint_path='model_us_cities')

for i in range(40):
    seed = random_sequence_from_string(string_utf8, maxlen)
    m.fit(X, Y, validation_set=0.1, batch_size=128,
          n_epoch=1, run_id='us_cities')