Exemple #1
0
def yn_net():
    net = tflearn.input_data(shape=[None, img_rows, img_cols, 1]) #D = 256, 256
    net = tflearn.conv_2d(net,nb_filter=8,filter_size=3, activation='relu', name='conv0.1')
    net = tflearn.conv_2d(net,nb_filter=8,filter_size=3, activation='relu', name='conv0.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool0') #D = 128, 128
    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.conv_2d(net,nb_filter=16,filter_size=3, activation='relu', name='conv1.1')
    net = tflearn.conv_2d(net,nb_filter=16,filter_size=3, activation='relu', name='conv1.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool1') #D = 64,  64
    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv2.1')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv2.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool2') #D = 32 by 32
    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv3.1')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv3.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool3') #D = 16 by 16
    net = tflearn.dropout(net,0.75,name='dropout0')
#    net = tflearn.conv_2d(net,nb_filter=64,filter_size=3, activation='relu', name='conv4.1')
#    net = tflearn.conv_2d(net,nb_filter=64,filter_size=3, activation='relu', name='conv4.2')
#    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool4') #D = 8 by 8
#    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.fully_connected(net, n_units = 128, activation='relu', name='fc1')
    net = tflearn.fully_connected(net, 2, activation='sigmoid')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.001)
    model = tflearn.DNN(net, tensorboard_verbose=1,tensorboard_dir='/tmp/tflearn_logs/')
    return model
Exemple #2
0
def use_tflearn():
    import tflearn

    # Data loading and preprocessing
    import tflearn.datasets.mnist as mnist
    X, Y, testX, testY = mnist.load_data(one_hot=True)

    # Building deep neural network
    input_layer = tflearn.input_data(shape=[None, 784])
    dense1 = tflearn.fully_connected(input_layer, 64, activation='tanh',
                                     regularizer='L2', weight_decay=0.001)
    dropout1 = tflearn.dropout(dense1, 0.8)
    dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh',
                                     regularizer='L2', weight_decay=0.001)
    dropout2 = tflearn.dropout(dense2, 0.8)
    softmax = tflearn.fully_connected(dropout2, 10, activation='softmax')

    # Regression using SGD with learning rate decay and Top-3 accuracy
    sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000)
    top_k = tflearn.metrics.Top_k(3)
    net = tflearn.regression(softmax, optimizer=sgd, metric=top_k,
                             loss='categorical_crossentropy')

    # Training
    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.fit(X, Y, n_epoch=20, validation_set=(testX, testY),
              show_metric=True, run_id="dense_model")
def vgg16(input, num_class):

    x = tflearn.conv_2d(input, 64, 3, activation='relu', scope='conv1_1')
    x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1')

    x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1')
    x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2')

    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1')
    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2')
    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5')

    x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6')
    x = tflearn.dropout(x, 0.5, name='dropout1')

    x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc7')
    x = tflearn.dropout(x, 0.5, name='dropout2')

    x = tflearn.fully_connected(x, num_class, activation='softmax', scope='fc8',
                                restore=False)

    return x
def run():
    # imagine cnn, the third dim is like the 'chnl'
    g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])
    g = tflearn.lstm(g, 512, return_seq=True)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, 512)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
    g = tflearn.regression(g, optimizer='adam',
                           loss='categorical_crossentropy',
                           learning_rate=0.001)

    m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                                  seq_maxlen=maxlen,
                                  clip_gradients=5.0,
                                  checkpoint_path='models/model_us_cities')

    for i in range(40):
        seed = random_sequence_from_textfile(path, maxlen)
        m.fit(X, Y, validation_set=0.1, batch_size=128,
              n_epoch=1, run_id='us_cities')
        print("-- TESTING...")
        print("-- Test with temperature of 1.2 --")
        print(m.generate(30, temperature=1.2, seq_seed=seed))
        print("-- Test with temperature of 1.0 --")
        print(m.generate(30, temperature=1.0, seq_seed=seed))
        print("-- Test with temperature of 0.5 --")
        print(m.generate(30, temperature=0.5, seq_seed=seed))
Exemple #5
0
def vgg16(placeholderX=None):

    x = tflearn.input_data(shape=[None, 224, 224, 3], name='input',
                           placeholder=placeholderX)

    x = tflearn.conv_2d(x, 64, 3, activation='relu', name='conv1_1')
    x = tflearn.conv_2d(x, 64, 3, activation='relu', name='conv1_2')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='pool1')

    x = tflearn.conv_2d(x, 128, 3, activation='relu', name='conv2_1')
    x = tflearn.conv_2d(x, 128, 3, activation='relu', name='conv2_2')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='pool2')

    x = tflearn.conv_2d(x, 256, 3, activation='relu', name='conv3_1')
    x = tflearn.conv_2d(x, 256, 3, activation='relu', name='conv3_2')
    x = tflearn.conv_2d(x, 256, 3, activation='relu', name='conv3_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='pool3')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', name='conv4_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', name='conv4_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', name='conv4_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='pool4')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', name='conv5_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', name='conv5_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', name='conv5_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='pool5')

    x = tflearn.conv_2d(x, 4096, 7, activation='relu', name='fc6')
    x = tflearn.dropout(x, 0.5)

    x = tflearn.conv_2d(x, 4096, 1, activation='relu', name='fc7')
    x = tflearn.dropout(x, 0.5)

    return x
 def make_core_network(network):
     dense1 = tflearn.fully_connected(network, 64, activation='tanh',
                                      regularizer='L2', weight_decay=0.001, name="dense1")
     dropout1 = tflearn.dropout(dense1, 0.8)
     dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh',
                                      regularizer='L2', weight_decay=0.001, name="dense2")
     dropout2 = tflearn.dropout(dense2, 0.8)
     softmax = tflearn.fully_connected(dropout2, 10, activation='softmax', name="softmax")
     return softmax
def get_model_action():
    # Network building
    net = tflearn.input_data(shape=[None, 10, 128], name='net2_layer1')
    net = tflearn.lstm(net, n_units=256, return_seq=True, name='net2_layer2')
    net = tflearn.dropout(net, 0.6, name='net2_layer3')
    net = tflearn.lstm(net, n_units=256, return_seq=False, name='net2_layer4')
    net = tflearn.dropout(net, 0.6, name='net2_layer5')
    net = tflearn.fully_connected(net, 5, activation='softmax', name='net2_layer6')
    net = tflearn.regression(net, optimizer='sgd', loss='categorical_crossentropy', learning_rate=0.001,
                             name='net2_layer7')
    return tflearn.DNN(net, clip_gradients=5.0, tensorboard_verbose=0)
Exemple #8
0
def shakespeare():


    path = "shakespeare_input.txt"
    #path = "shakespeare_input-100.txt"
    char_idx_file = 'char_idx.pickle'

    if not os.path.isfile(path):
        urllib.request.urlretrieve(
            "https://raw.githubusercontent.com/tflearn/tflearn.github.io/master/resources/shakespeare_input.txt", path)

    maxlen = 25

    char_idx = None
    if os.path.isfile(char_idx_file):
        print('Loading previous char_idx')
        char_idx = pickle.load(open(char_idx_file, 'rb'))

    X, Y, char_idx = \
        textfile_to_semi_redundant_sequences(path, seq_maxlen=maxlen, redun_step=3,
                                             pre_defined_char_idx=char_idx)

    pickle.dump(char_idx, open(char_idx_file, 'wb'))

    g = tflearn.input_data([None, maxlen, len(char_idx)])
    g = tflearn.lstm(g, 512, return_seq=True)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, 512, return_seq=True)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, 512)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
    g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                           learning_rate=0.001)

    m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                                  seq_maxlen=maxlen,
                                  clip_gradients=5.0,
                                  checkpoint_path='model_shakespeare')

    for i in range(50):
        seed = random_sequence_from_textfile(path, maxlen)
        m.fit(X, Y, validation_set=0.1, batch_size=128,
              n_epoch=1, run_id='shakespeare')
        print("-- TESTING...")
        print("-- Test with temperature of 1.0 --")
        print(m.generate(600, temperature=1.0, seq_seed=seed))
        #print(m.generate(10, temperature=1.0, seq_seed=seed))
        print("-- Test with temperature of 0.5 --")
        print(m.generate(600, temperature=0.5, seq_seed=seed))
    def build_cnn_network(self, network):
        """ Build CNN network.

        Args:
            network: base network.

        Returns:
            model: CNN model.

        """
        print('Building CNN network.')
        # Convolutional network building
        network = tflearn.conv_2d(network, 32,
                            self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = tflearn.max_pool_2d(network, 2)
        network = tflearn.conv_2d(network, 64,
                          self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = tflearn.conv_2d(network, 64,
                          self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = tflearn.max_pool_2d(network, 2)
        network = tflearn.fully_connected(
            network, 32 * 32, activation='relu')
        network = tflearn.dropout(network, 0.5)
        # Two category. positive or negative.
        network = tflearn.fully_connected(network, 2,
                                  activation='softmax')
        network = tflearn.regression(network, optimizer='adam',
                             loss='categorical_crossentropy',
                             learning_rate=0.001)
        print("CNN network built.")
        return network
    def deep_model(self, wide_inputs, n_inputs, n_nodes=[100, 50], use_dropout=False):
        '''
        Model - deep, i.e. two-layer fully connected network model
        '''
        cc_input_var = {}
        cc_embed_var = {}
        flat_vars = []
        if self.verbose:
            print ("--> deep model: %s categories, %d continuous" % (len(self.categorical_columns), n_inputs))
        for cc, cc_size in self.categorical_columns.items():
            cc_input_var[cc] = tflearn.input_data(shape=[None, 1], name="%s_in" % cc,  dtype=tf.int32)
            # embedding layers only work on CPU!  No GPU implementation in tensorflow, yet!
            cc_embed_var[cc] = tflearn.layers.embedding_ops.embedding(cc_input_var[cc],    cc_size,  8, name="deep_%s_embed" % cc)
            if self.verbose:
                print ("    %s_embed = %s" % (cc, cc_embed_var[cc]))
            flat_vars.append(tf.squeeze(cc_embed_var[cc], squeeze_dims=[1], name="%s_squeeze" % cc))

        network = tf.concat(1, [wide_inputs] + flat_vars, name="deep_concat")
        for k in range(len(n_nodes)):
            network = tflearn.fully_connected(network, n_nodes[k], activation="relu", name="deep_fc%d" % (k+1))
            if use_dropout:
                network = tflearn.dropout(network, 0.5, name="deep_dropout%d" % (k+1))
        if self.verbose:
            print ("Deep model network before output %s" % network)
        network = tflearn.fully_connected(network, 1, activation="linear", name="deep_fc_output", bias=False)
        network = tf.reshape(network, [-1, 1])	# so that accuracy is binary_accuracy
        if self.verbose:
            print ("Deep model network %s" % network)
        return network
Exemple #11
0
    def test_sequencegenerator(self):

        with tf.Graph().as_default():
            text = "123456789101234567891012345678910123456789101234567891012345678910"
            maxlen = 5

            X, Y, char_idx = \
                tflearn.data_utils.string_to_semi_redundant_sequences(text, seq_maxlen=maxlen, redun_step=3)

            g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])
            g = tflearn.lstm(g, 32)
            g = tflearn.dropout(g, 0.5)
            g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
            g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                                   learning_rate=0.1)

            m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                                          seq_maxlen=maxlen,
                                          clip_gradients=5.0)
            m.fit(X, Y, validation_set=0.1, n_epoch=100, snapshot_epoch=False)
            res = m.generate(10, temperature=1., seq_seed="12345")
            self.assertEqual(res, "123456789101234", "SequenceGenerator test failed! Generated sequence: " + res + " expected '123456789101234'")

            # Testing save method
            m.save("test_seqgen.tflearn")
            self.assertTrue(os.path.exists("test_seqgen.tflearn"))

            # Testing load method
            m.load("test_seqgen.tflearn")
            res = m.generate(10, temperature=1., seq_seed="12345")
            self.assertEqual(res, "123456789101234", "SequenceGenerator test failed after loading model! Generated sequence: " + res + " expected '123456789101234'")
Exemple #12
0
def generator_xss():
    global char_idx
    global xss_data_file
    global maxlen


    if os.path.isfile(char_idx_file):
        print('Loading previous xxs_char_idx')
        char_idx = pickle.load(open(char_idx_file, 'rb'))


    X, Y, char_idx = \
        textfile_to_semi_redundant_sequences(xss_data_file, seq_maxlen=maxlen, redun_step=3,
                                             pre_defined_char_idx=char_idx)


    #pickle.dump(char_idx, open(char_idx_file, 'wb'))

    g = tflearn.input_data([None, maxlen, len(char_idx)])
    g = tflearn.lstm(g, 32, return_seq=True)
    g = tflearn.dropout(g, 0.1)
    g = tflearn.lstm(g, 32, return_seq=True)
    g = tflearn.dropout(g, 0.1)
    g = tflearn.lstm(g, 32)
    g = tflearn.dropout(g, 0.1)
    g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
    g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                           learning_rate=0.001)

    m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                                  seq_maxlen=maxlen,
                                  clip_gradients=5.0,
                                  checkpoint_path='chkpoint/model_scanner_poc')

    print "random_sequence_from_textfile"
    #seed = random_sequence_from_textfile(xss_data_file, maxlen)
    seed='"/><script>'
    m.fit(X, Y, validation_set=0.1, batch_size=128,
              n_epoch=2, run_id='scanner-poc')
    print("-- TESTING...")

    print("-- Test with temperature of 0.1 --")
    print(m.generate(32, temperature=0.1, seq_seed=seed))
    print("-- Test with temperature of 0.5 --")
    print(m.generate(32, temperature=0.5, seq_seed=seed))
    print("-- Test with temperature of 1.0 --")
    print(m.generate(32, temperature=1.0, seq_seed=seed))
def generate_net(embedding):
    net = tflearn.input_data([None, 200])
    net = tflearn.embedding(net, input_dim=300000, output_dim=128)
    net = tflearn.lstm(net, 128)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam',
                             loss='categorical_crossentropy')
    return net
 def simple_learn(self):
     tflearn.init_graph()
     net=tflearn.input_data(shape=[None,64,64,3])
     net=tflearn.fully_connected(net,64)
     net=tflearn.dropout(net,.5)
     net=tflearn.fully_connected(net,10,activation='softmax')
     net=tflearn.regression(net,optimizer='adam',loss='softmax_categorical_crossentropy')
     model = tflearn.DNN(net)
     model.fit(self.trainset,self.trainlabels)
def build(embedding_size=(400000, 50), train_embedding=False, hidden_dims=128,
          learning_rate=0.001):
    net = tflearn.input_data([None, 200])
    net = tflearn.embedding(net, input_dim=embedding_size[0],
                            output_dim=embedding_size[1],
                            trainable=train_embedding, name='EmbeddingLayer')
    net = tflearn.lstm(net, hidden_dims, return_seq=True)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.lstm(net, hidden_dims, return_seq=True)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.lstm(net, hidden_dims, return_seq=True)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.lstm(net, hidden_dims)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
                             loss='categorical_crossentropy')
    return net
Exemple #16
0
def deep_net_tflearn(X_train,X_test,Y_train,Y_test, num_epoch, first_layer, second_layer, third_layer,fourth_layer):
    #Implementation with TFLEARN
    tf.reset_default_graph()
    tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.8)
    tnorm = tflearn.initializations.uniform(minval=-1.0, maxval=1.0)

    # Building DNN
    nn = tflearn.input_data(shape=[None, len(X_train[0])])
    Input = nn
    nn = tflearn.fully_connected(nn, first_layer, activation='elu', regularizer='L2', weights_init=tnorm, name = "layer_1")
    nn = tflearn.dropout(nn, 0.5)
    nn = tflearn.fully_connected(nn, second_layer, activation='elu', regularizer='L2', weights_init=tnorm, name = "layer_2")
    nn = tflearn.dropout(nn, 0.5)
    nn = tflearn.fully_connected(nn, third_layer, activation='elu', regularizer='L2', weights_init=tnorm, name = "layer_3")
    nn = tflearn.dropout(nn, 0.5)
    nn = tflearn.fully_connected(nn, fourth_layer, activation='elu', regularizer='L2', weights_init=tnorm, name = "layer_4")
    nn = tflearn.dropout(nn, 0.5)
    Hidden_state = nn
    nn = tflearn.fully_connected(nn, len(Y_train[0]), activation='elu', weights_init=tnorm, name = "layer_5")
    Output = nn    
    #custom_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
    #    out_layer, tf_train_labels) +
    #    0.01*tf.nn.l2_loss(hidden_weights) +
    #    0.01*tf.nn.l2_loss(hidden_biases) +
    #    0.01*tf.nn.l2_loss(out_weights) +
    #    0.01*tf.nn.l2_loss(out_biases))


    # Regression, with mean square error
    net = tflearn.regression(nn, optimizer='SGD' , learning_rate=0.001, loss ='categorical_crossentropy', metric=None)

    # Training the auto encoder
    model = tflearn.DNN(net, tensorboard_verbose=3)
    model.fit( X_train,  Y_train, n_epoch = num_epoch, validation_set=0.1, run_id="bitsight_nn_tflearn", batch_size=128)
    pred = model.predict(X_test)
    total = 0
    correct = 0

    for i in range(len(pred)):
        total += 1
        if np.argmax(pred[i]) == np.argmax(Y_test[i]):
            correct += 1
    return total*1., correct*1.
Exemple #17
0
def run():
    net = tflearn.input_data(shape=[None, 224, 224, 3])

    net = tflearn.conv_2d(net, 64, 3, activation='relu')
    net = tflearn.conv_2d(net, 64, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)

    net = tflearn.conv_2d(net, 128, 3, activation='relu')
    net = tflearn.conv_2d(net, 128, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)

    net = tflearn.conv_2d(net, 256, 3, activation='relu')
    net = tflearn.conv_2d(net, 256, 3, activation='relu')
    net = tflearn.conv_2d(net, 256, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)

    net = tflearn.conv_2d(net, 512, 3, activation='relu')
    net = tflearn.conv_2d(net, 512, 3, activation='relu')
    net = tflearn.conv_2d(net, 512, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)

    net = tflearn.conv_2d(net, 512, 3, activation='relu')
    net = tflearn.conv_2d(net, 512, 3, activation='relu')
    net = tflearn.conv_2d(net, 512, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)

    net = tflearn.fully_connected(net, 4096, activation='relu')
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 4096, activation='relu')
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 17, activation='softmax')

    net = tflearn.regression(net, optimizer='rmsprop',
                     loss='categorical_crossentropy',
                     learning_rate=0.001)

    m = tflearn.DNN(net, checkpoint_path='models/vgg_net',
                    max_checkpoints=1, tensorboard_verbose=3)
    m.fit(X, Y, n_epoch=500, shuffle=True,
          show_metric=True, batch_size=32, snapshot_step=500,
          snapshot_epoch=False, run_id='vgg_net')
    m.save('models/vgg_net.tfl')
Exemple #18
0
def vgg16(placeholderX=None, softmax_size=1000, restore_softmax=True,
          data_preprocessing=None, data_augmentation=None):

    x = tflearn.input_data(shape=[None, 224, 224, 3], name='input',
                           placeholder=placeholderX,
                           data_preprocessing=data_preprocessing,
                           data_augmentation=data_augmentation)

    x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_1')
    x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1')

    x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1')
    x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2')

    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1')
    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2')
    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5')

    x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6')
    x = tflearn.dropout(x, 0.5, name='dropout1')

    x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc7')
    x = tflearn.dropout(x, 0.5, name='dropout2')

    x = tflearn.fully_connected(x, softmax_size, activation='softmax',
                                scope='fc8', restore=restore_softmax)

    return x
def run():
    net = tflearn.input_data([None, 100])
    net = tflearn.embedding(net, input_dim=20000, output_dim=128)
    net = tflearn.bidirectional_rnn(
        net, tflearn.BasicLSTMCell(128), tflearn.BasicLSTMCell(128))
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(
        net, optimizer='adam', loss='categorical_crossentropy')

    m = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
    m.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=64)
    m.save('models/bidirectional_rnn.tfl')
Exemple #20
0
def vgg16(placeholderX=None):

    x = tflearn.input_data(shape=[None, 224, 224, 3], name='input',
                           placeholder=placeholderX)

    x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_1')
    x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1')

    x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1')
    x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2')

    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1')
    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2')
    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5')

    x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6')
    x = tflearn.dropout(x, 0.5, name='dropout1')

    x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc7')
    x = tflearn.dropout(x, 0.5, name='dropout2')

    x = tflearn.fully_connected(x, 1000, activation='softmax', scope='fc8')

    return x
Exemple #21
0
	def train(self):

		char_idx = None

		if(os.path.isfile(self.charIDXFile)):
			# load previous character file
			char_idx = pickle.load(open(self.charIDXFile, 'rb'))


		X, Y, char_idx = textfile_to_semi_redundant_sequences(self.path,seq_maxlen=self.maxLength,redun_step=3)

		pickle.dump(char_idx, open(self.charIDXFile, 'wb'))

		self.g = tflearn.input_data([None,self.maxLength,len(char_idx)]);
		self.g = tflearn.lstm(self.g,512,return_seq=True)
		self.g = tflearn.dropout(self.g,0.5)
		self.g = tflearn.lstm(self.g,512,return_seq=True)
		self.g = tflearn.dropout(self.g,0.5)
		self.g = tflearn.lstm(self.g,512)
		self.g = tflearn.dropout(self.g,0.5)
		self.g = tflearn.fully_connected(self.g,len(char_idx),activation='softmax')
		self.g = tflearn.regression(self.g, optimizer='adam', loss='categorical_crossentropy',
								 learning_rate=0.001)
		self.model = tflearn.SequenceGenerator(self.g, dictionary=char_idx, seq_maxlen=self.maxLength, max_checkpoints=0,checkpoint_path='model_trump')
def vgg16(input, num_class):

    #in the model, we added trainable=False to make sure the parameter are not updated during training
    x = tflearn.conv_2d(input, 64, 3, activation='relu', scope='conv1_1',trainable=False)
    x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2',trainable=False)
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1')

    x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1',trainable=False)
    x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2',trainable=False)
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2')

    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1',trainable=False)
    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2',trainable=False)
    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3',trainable=False)
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1',trainable=False)
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2',trainable=False)
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3',trainable=False)
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5')

    x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6')
    x = tflearn.dropout(x, 0.5, name='dropout1')
    #we changed the structure here to let the fc only have 2048, less parameter, enough for our task
    x = tflearn.fully_connected(x, 2048, activation='relu', scope='fc7',restore=False)
    x = tflearn.dropout(x, 0.5, name='dropout2')

    x = tflearn.fully_connected(x, num_class, activation='softmax', scope='fc8',
                                restore=False)

    return x
def predict_new_velocity(x):
    # Building deep neural network
    input_layer = tflearn.input_data(shape=[None, 360, 3])
    dense1 = tflearn.fully_connected(input_layer, 64, activation='relu',
                                    regularizer='L2', weight_decay=0.001)
    dropout1 = tflearn.dropout(dense1, 0.5)
    dense2 = tflearn.fully_connected(dropout1, 64, activation='relu',
                                    regularizer='L2', weight_decay=0.001)
    dropout2 = tflearn.dropout(dense2, 0.5)
    softmax = tflearn.fully_connected(dropout2, 8, activation='softmax')

    # Regression using SGD with learning rate decay and Top-3 accuracy
    sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000)
    top_k = tflearn.metrics.Top_k(3)
    net = tflearn.regression(softmax, optimizer=sgd, metric=top_k,
                            loss='categorical_crossentropy')

    # load the trained model
    model = tflearn.DNN(net, tensorboard_verbose=3)
    model.load("model.tflearn")

    predict_y = model.predict(x)
    new_y = np.argmax(predict_y, axis=1)
    return new_y.astype(np.uint8)
Exemple #24
0
    def test_core_layers(self):

        X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
        Y_nand = [[1.], [1.], [1.], [0.]]
        Y_or = [[0.], [1.], [1.], [1.]]

        # Graph definition
        with tf.Graph().as_default():
            # Building a network with 2 optimizers
            g = tflearn.input_data(shape=[None, 2])

            # Nand operator definition
            g_nand = tflearn.fully_connected(g, 32, activation='linear')
            g_nand = tflearn.fully_connected(g_nand, 32, activation='linear')
            g_nand = tflearn.fully_connected(g_nand, 1, activation='sigmoid')
            g_nand = tflearn.regression(g_nand, optimizer='sgd',
                                        learning_rate=2.,
                                        loss='binary_crossentropy')
            # Or operator definition
            g_or = tflearn.fully_connected(g, 32, activation='linear')
            g_or = tflearn.fully_connected(g_or, 32, activation='linear')
            g_or = tflearn.fully_connected(g_or, 1, activation='sigmoid')
            g_or = tflearn.regression(g_or, optimizer='sgd',
                                      learning_rate=2.,
                                      loss='binary_crossentropy')
            # XOR merging Nand and Or operators
            g_xor = tflearn.merge([g_nand, g_or], mode='elemwise_mul')

            # Training
            m = tflearn.DNN(g_xor)
            m.fit(X, [Y_nand, Y_or], n_epoch=400, snapshot_epoch=False)

            # Testing
            self.assertLess(m.predict([[0., 0.]])[0][0], 0.01)
            self.assertGreater(m.predict([[0., 1.]])[0][0], 0.9)
            self.assertGreater(m.predict([[1., 0.]])[0][0], 0.9)
            self.assertLess(m.predict([[1., 1.]])[0][0], 0.01)

        # Bulk Tests
        with tf.Graph().as_default():
            net = tflearn.input_data(shape=[None, 2])
            net = tflearn.flatten(net)
            net = tflearn.reshape(net, new_shape=[-1])
            net = tflearn.activation(net, 'relu')
            net = tflearn.dropout(net, 0.5)
            net = tflearn.single_unit(net)
Exemple #25
0
    def test_sequencegenerator_words(self):

        with tf.Graph().as_default():
            text = ["hello","world"]*100
            word_idx = {"hello": 0, "world": 1}
            maxlen = 2

            vec = [x for x in map(word_idx.get, text) if x is not None]

            sequences = []
            next_words = []
            for i in range(0, len(vec) - maxlen, 3):
                sequences.append(vec[i: i + maxlen])
                next_words.append(vec[i + maxlen])

            X = np.zeros((len(sequences), maxlen, len(word_idx)), dtype=np.bool)
            Y = np.zeros((len(sequences), len(word_idx)), dtype=np.bool)
            for i, seq in enumerate(sequences):
                for t, idx in enumerate(seq):
                    X[i, t, idx] = True
                    Y[i, next_words[i]] = True

            g = tflearn.input_data(shape=[None, maxlen, len(word_idx)])
            g = tflearn.lstm(g, 32)
            g = tflearn.dropout(g, 0.5)
            g = tflearn.fully_connected(g, len(word_idx), activation='softmax')
            g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                                   learning_rate=0.1)

            m = tflearn.SequenceGenerator(g, dictionary=word_idx,
                                          seq_maxlen=maxlen,
                                          clip_gradients=5.0)
            m.fit(X, Y, validation_set=0.1, n_epoch=100, snapshot_epoch=False)
            res = m.generate(4, temperature=.5, seq_seed=["hello","world"])
            res_str = " ".join(res[-2:])
            self.assertEqual(res_str, "hello world", "SequenceGenerator (word level) test failed! Generated sequence: " + res_str + " expected 'hello world'")

            # Testing save method
            m.save("test_seqgen_word.tflearn")
            self.assertTrue(os.path.exists("test_seqgen_word.tflearn"))

            # Testing load method
            m.load("test_seqgen_word.tflearn")
            res = m.generate(4, temperature=.5, seq_seed=["hello","world"])
            res_str = " ".join(res[-2:])
            self.assertEqual(res_str, "hello world", "Reloaded SequenceGenerator (word level) test failed! Generated sequence: " + res_str + " expected 'hello world'")
Exemple #26
0
def bi_lstm(trainX, trainY,testX, testY):
    trainX = pad_sequences(trainX, maxlen=200, value=0.)
    testX = pad_sequences(testX, maxlen=200, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Network building
    net = tflearn.input_data(shape=[None, 200])
    net = tflearn.embedding(net, input_dim=20000, output_dim=128)
    net = tflearn.bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(128))
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy')

    # Training
    model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
    model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=64,run_id="rnn-bilstm")
Exemple #27
0
    total=0
    for i in range(len(a)):
        if label_data[argsort(-a[i])[0]]==1:
            sum_1+=1
            total+=1
        else:
            total+=1
    return sum_1/total
num_mat,categories_mat,label_data,combine_mat=np.process_data()
x_train,x_test,y_train,y_test=train_test_split(combine_mat,label_data,test_size=0.2,random_state=42)
tf.app.flags.DEFINE_integer('epochs',10,'Training epochs')
FLAGS=tf.app.flags.FLAGS
n_features = combine_mat.shape[1]
input = tflearn.input_data ([None, n_features])
network = tflearn.layers.fully_connected (input, 2000, activation='relu')
network = tflearn.dropout(network, 0.5)
network = tflearn.layers.fully_connected (network, 2000, activation='relu')
net = tflearn.dropout(network, 0.5)
y_pred = tflearn.layers.fully_connected (network, 4, activation='softmax')
net = tflearn.regression (y_pred,optimizer='adam',loss='categorical_crossentropy')
model = tflearn.DNN (net)
model.fit (x_train, y_train, validation_set=0.1, n_epoch=FLAGS.epochs)
#metric = model.evaluate (x_test, y_test)
prdict_y=model.predict(x_test)
#test
a=array(prdict_y)
print calculate_accuracy()



Exemple #28
0
input_layer = tflearn.input_data(shape=[None, 88])
dense1 = tflearn.fully_connected(input_layer,
                                 88,
                                 activation='relu',
                                 weights_init=tflearn.initializations.xavier(),
                                 regularizer='L2',
                                 weight_decay=0.01)
dense2 = tflearn.fully_connected(dense1,
                                 64,
                                 activation='tanh',
                                 weights_init=tflearn.initializations.normal(),
                                 regularizer='L2',
                                 weight_decay=0.01)
bn1 = tflearn.batch_normalization(dense2)
dropout1 = tflearn.dropout(bn1, 0.9)
dense3 = tflearn.fully_connected(dropout1,
                                 32,
                                 activation='tanh',
                                 weights_init=tflearn.initializations.normal(),
                                 regularizer='L2',
                                 weight_decay=0.01)
dense4 = tflearn.fully_connected(dense3,
                                 16,
                                 activation='relu',
                                 weights_init=tflearn.initializations.normal(),
                                 regularizer='L2',
                                 weight_decay=0.01)
bn2 = tflearn.batch_normalization(dense4)
dropout2 = tflearn.dropout(bn2, 0.9)
softmax = tflearn.fully_connected(dropout2, 2, activation='softmax')
Exemple #29
0
# IMDB Dataset loading
train, val, test = imdb.load_data(path='imdb.pkl', maxlen=200,
                                  n_words=20000)
trainX, trainY = train
valX, valY = val
testX, testY = test

# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=200, value=0.)
valX = pad_sequences(valX, maxlen=200, value=0.)
testX = pad_sequences(testX, maxlen=200, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
valY = to_categorical(valY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)

# Network building
net = tflearn.input_data([None, 200])
net = tflearn.embedding(net, input_dim=20000, output_dim=128)
net = tflearn.lstm(net, 128)
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam',
                         loss='categorical_crossentropy')

# Training
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=0)
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
          batch_size=128)
Exemple #30
0
import tflearn.datasets.mnist as mnist
mnist_data = mnist.read_data_sets(one_hot=True)

# User defined placeholders
with tf.Graph().as_default():
    # Placeholders for data and labels
    X = tf.placeholder(shape=(None, 784), dtype=tf.float32)
    Y = tf.placeholder(shape=(None, 10), dtype=tf.float32)

    net = tf.reshape(X, [-1, 28, 28, 1])

    # Using TFLearn wrappers for network building
    net = tflearn.conv_2d(net, 32, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)
    net = tflearn.local_response_normalization(net)
    net = tflearn.dropout(net, 0.8)
    net = tflearn.conv_2d(net, 64, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)
    net = tflearn.local_response_normalization(net)
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 128, activation='tanh')
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 256, activation='tanh')
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 10, activation='softmax')

    # Defining other ops using Tensorflow
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(net, Y))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)

    # Initializing the variables
Exemple #31
0
    for i in range(200):
        ix.append(imToTensor(imToAr('tt5/{0}'.format(i))))
        ref.append(last)

    return ix, ref


X, Y = info()

n_classes = 11

net = tfl.input_data([None, 34, 30])
net = tfl.conv_1d(net, 25, 5, activation='relu')
net = tfl.max_pool_1d(net, 2)
net = tfl.dropout(net, 0.8)
net = tfl.conv_1d(net, 35, 3, activation='relu')
net = tfl.max_pool_1d(net, 2)
net = tfl.dropout(net, 0.75)
net = tfl.fully_connected(net, 128, activation='relu')
net = tfl.fully_connected(net, 50, activation='relu')
net = tfl.fully_connected(net, n_classes, activation='softmax')

reg = tfl.regression(net,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=0.00003)

mod = tfl.DNN(reg, tensorboard_verbose=0)

mod.load('conv_nn8.1')
def FacePatches_NET_3Conv_IInception_tflear(eyep, middlep, mouthp):
    e_net = tflearn.conv_2d(eyep,
                            8,
                            3,
                            activation='relu',
                            name='eye_conv1_1_3x3')
    e_net = tflearn.conv_2d(e_net,
                            8,
                            3,
                            activation='relu',
                            name='eye_conv1_2_3x3')
    e_net = tflearn.max_pool_2d(e_net, 2, 2, name='eye_pool1')
    e_net = tflearn.conv_2d(e_net,
                            32,
                            3,
                            activation='relu',
                            name='eye_conv2_1_3x3')
    e_net = tflearn.conv_2d(e_net,
                            32,
                            3,
                            activation='relu',
                            name='eye_conv2_2_3x3')
    e_net = tflearn.max_pool_2d(e_net, 2, 2, name='eye_pool2')
    e_net = tflearn.conv_2d(e_net,
                            128,
                            3,
                            activation='relu',
                            name='eye_conv3_1_3x3')
    e_net = tflearn.conv_2d(e_net,
                            128,
                            3,
                            activation='relu',
                            name='eye_conv3_2_3x3')
    e_net = tflearn.max_pool_2d(e_net, 2, 2, name='eye_pool3')
    e_net = tflearn.fully_connected(e_net,
                                    1024,
                                    activation='tanh',
                                    name='eye_fc1')

    mi_net = tflearn.conv_2d(middlep,
                             8,
                             3,
                             activation='relu',
                             name='middle_conv1_1_3x3')
    mi_net = tflearn.conv_2d(mi_net,
                             8,
                             3,
                             activation='relu',
                             name='middle_conv1_2_3x3')
    mi_net = tflearn.max_pool_2d(mi_net, 2, 2, name='middle_pool1')
    mi_net = tflearn.conv_2d(mi_net,
                             32,
                             3,
                             activation='relu',
                             name='middle_conv2_1_3x3')
    mi_net = tflearn.conv_2d(mi_net,
                             32,
                             3,
                             activation='relu',
                             name='middle_conv2_2_3x3')
    mi_net = tflearn.max_pool_2d(mi_net, 2, 2, name='middle_pool2')
    mi_net = tflearn.conv_2d(mi_net,
                             128,
                             3,
                             activation='relu',
                             name='middle_conv3_1_3x3')
    mi_net = tflearn.conv_2d(mi_net,
                             128,
                             3,
                             activation='relu',
                             name='middle_conv3_2_3x3')
    mi_net = tflearn.max_pool_2d(mi_net, 2, 2, name='middle_pool3')
    mi_net = tflearn.fully_connected(mi_net,
                                     1024,
                                     activation='tanh',
                                     name='middle_fc1')

    mo_net = tflearn.conv_2d(mouthp,
                             8,
                             3,
                             activation='relu',
                             name='mouth_conv1_1_3x3')
    mo_net = tflearn.conv_2d(mo_net,
                             8,
                             3,
                             activation='relu',
                             name='mouth_conv1_2_3x3')
    mo_net = tflearn.max_pool_2d(mo_net, 2, 2, name='mouth_pool1')
    mo_net = tflearn.conv_2d(mo_net,
                             32,
                             3,
                             activation='relu',
                             name='mouth_conv2_1_3x3')
    mo_net = tflearn.conv_2d(mo_net,
                             32,
                             3,
                             activation='relu',
                             name='mouth_conv2_2_3x3')
    mo_net = tflearn.max_pool_2d(mo_net, 2, 2, name='mouth_pool2')
    mo_net = tflearn.conv_2d(mo_net,
                             128,
                             3,
                             activation='relu',
                             name='mouth_conv3_1_3x3')
    mo_net = tflearn.conv_2d(mo_net,
                             128,
                             3,
                             activation='relu',
                             name='mouth_conv3_2_3x3')
    mo_net = tflearn.max_pool_2d(mo_net, 2, 2, name='mouth_pool3')
    mo_net = tflearn.fully_connected(mo_net,
                                     1024,
                                     activation='tanh',
                                     name='mouth_fc1')

    fc_net = tf.concat([e_net, mi_net, mo_net], 1, name='fusion_1')
    fc_net = tflearn.fully_connected(fc_net,
                                     2048,
                                     activation='relu',
                                     name='fc1')
    fc_net = tflearn.dropout(fc_net, 0.8, name='drop1')
    fc_net = tflearn.fully_connected(fc_net,
                                     2048,
                                     activation='relu',
                                     name='fc2')
    fc_net = tflearn.dropout(fc_net, 0.8, name='drop2')
    softmax = tflearn.fully_connected(fc_net,
                                      7,
                                      activation='softmax',
                                      name='prob')
    return softmax
y_train = enc.fit_transform(y_train.reshape(-1, 1)).toarray()
y_test = enc.fit_transform(y_test.reshape(-1, 1)).toarray()

n_inputs = 30
n_hidden1 = 3
n_hidden2 = 5
n_outputs = 2

n_epochs = 50
batch_size = 30

# 망
keeping_rate = 0.6

inputs = tflearn.input_data(shape=[None, n_inputs])
inputs_drop = tflearn.dropout(inputs, keeping_rate)
hidden1 = tflearn.fully_connected(inputs_drop,
                                  n_hidden1,
                                  activation='relu',
                                  name='hidden1')
hidden1_drop = tflearn.dropout(hidden1, keeping_rate)

hidden2 = tflearn.fully_connected(hidden1_drop,
                                  n_hidden2,
                                  activation='relu',
                                  name='hidden2')
hidden2_drop = tflearn.dropout(hidden2, keeping_rate)

softmax = tflearn.fully_connected(hidden2_drop,
                                  n_outputs,
                                  activation='softmax',
Exemple #34
0
#Change kernel size from 1 to 3
net = tflearn.max_pool_3d(net, kernel_size=3, strides=2, padding='VALID')
net = tflearn.conv_3d(net,
                      32,
                      3,
                      strides=2,
                      padding='VALID',
                      weights_init='xavier',
                      regularizer='L2',
                      weight_decay=0.01)
net = tflearn.normalization.batch_normalization(net)
net = tflearn.activations.leaky_relu(net)
#kernel size from 1 to 2
net = tflearn.max_pool_3d(net, kernel_size=2, strides=2, padding='VALID')

net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net,
                              1024,
                              weights_init='xavier',
                              regularizer='L2')
net = tflearn.normalization.batch_normalization(net, gamma=1.1, beta=0.1)
net = tflearn.activations.leaky_relu(net)
net = tflearn.dropout(net, 0.6)
net = tflearn.fully_connected(net,
                              512,
                              weights_init='xavier',
                              regularizer='L2')
net = tflearn.normalization.batch_normalization(net, gamma=1.2, beta=0.2)
net = tflearn.activations.leaky_relu(net)
net = tflearn.dropout(net, 0.7)
net = tflearn.fully_connected(net,
Exemple #35
0
# X_train = pickle.load (open ("xtrain.p", rb))
# X_test = pickle.load (open ("xtest.p", rb))

### Models

print('Build model')

net = tflearn.input_data([None, model_size])
net = tflearn.embedding(net, input_dim=n_words, output_dim=lstm_size[0])
for i in range(len(lstm_size)):
    if i < len(lstm_size) - 1:
        net = tflearn.gru(net,
                          lstm_size[i],
                          activation=activation_function,
                          return_seq=True)
        net = tflearn.dropout(net, dropout_ratio)
    else:
        net = tflearn.gru(net, lstm_size[i], activation=activation_function)
        net = tflearn.dropout(net, dropout_ratio)
net = tflearn.fully_connected(net, len(qualities), activation='softmax')
net = tflearn.regression(net,
                         optimizer='adam',
                         learning_rate=0.001,
                         loss='categorical_crossentropy')

print('Train model')

model = tflearn.DNN(net, tensorboard_verbose=0, tensorboard_dir="logdir/gru")

print('Predict')
model.fit(X_train,
#%%
n_epoch = 50
classes = 15
hidden_layer_size = int((n_feat * 2) / 3 + classes)
print('hidden layer size: ', hidden_layer_size)
label_test = to_categorical(label_test, nb_classes=classes)
#%%

# Building deep neural network
input_layer = tflearn.input_data(shape=[None, n_feat])
dense1 = tflearn.fully_connected(input_layer,
                                 hidden_layer_size,
                                 activation='tanh',
                                 regularizer='L2',
                                 weight_decay=0.001)
dropout1 = tflearn.dropout(dense1, 0.5)
dense2 = tflearn.fully_connected(dropout1,
                                 hidden_layer_size // 2,
                                 activation='tanh',
                                 regularizer='L2',
                                 weight_decay=0.001)
dropout2 = tflearn.dropout(dense2, 0.5)
softmax = tflearn.fully_connected(dropout2, classes, activation='softmax')

sgd = tflearn.SGD(learning_rate=0.01, lr_decay=0.96, decay_step=1000)
top_k = tflearn.metrics.Top_k(3)
net = tflearn.regression(softmax,
                         optimizer=sgd,
                         metric=top_k,
                         loss='categorical_crossentropy')
Exemple #37
0
import os
from six.moves import urllib

import tflearn
from tflearn.data_utils import *

path = "bhadeshia_input.txt"

maxlen = 25

X, Y, char_idx = \
    textfile_to_semi_redundant_sequences(path, seq_maxlen=maxlen, redun_step=3)

g = tflearn.input_data([None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512)
g = tflearn.dropout(g, 0.5)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                       learning_rate=0.001)

m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                              seq_maxlen=maxlen,
                              clip_gradients=5.0,
                              checkpoint_path='model_bhadeshia')

for i in range(50):
    seed = random_sequence_from_textfile(path, maxlen)
print("The number of test samples are:", len(dataTestX[0]))
print("\n")

#Build a model
net1 = conv_net()
net2 = conv_net()
net3 = conv_net()
net4 = conv_net()
net5 = conv_net()
net6 = conv_net()

net = tflearn.merge([net1, net2, net3, net4, net5, net6], 'concat')
net = tflearn.reshape(net, [-1, 1, 24 * 6 * 96])
net = tflearn.lstm(net, 512, dropout=0.8)
net = tflearn.fully_connected(net, 128, activation='relu')
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 100, activation='relu')
print("Shape before regression", net.shape)
net = tflearn.regression(net,
                         optimizer='RMSprop',
                         loss='mean_square',
                         learning_rate=0.0005)

#Train a model
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit([
    dataTrainX[0], dataTrainX[1], dataTrainX[2], dataTrainX[3], dataTrainX[4],
    dataTrainX[5]
],
          dataTrainY,
          validation_set=0.1,
Exemple #39
0
import tensorflow as tf
import tflearn
import tflearn.datasets.mnist as mnist
mnist_data = mnist.read_data_sets(one_hot=True)
with tf.Graph().as_default():
    # placeholders for data and labels
    X = tf.placeholder(shape=(None, 784), dtype=tf.float32)
    Y = tf.placeholder(shape=(None, 10), dtype=tf.float32)
    net = tf.reshape(X, [-1, 28, 28, 1])
    net = tflearn.conv_2d(net, 32, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)
    net = tflearn.local_response_normalization(net)
    net = tflearn.dropout(net, 0.8)
    net = tflearn.conv_2d(net, 64, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)
    net = tflearn.local_response_normalization(net)
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 128, activation='tanh')
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 256, activation='tanh')
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 10, activation='linear')
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(net, Y))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        batch_size = 128
        for epoch in range(2):
            avg_cost = 0
            total_batch = int(mnist_data.train.num_examples / batch_size)
def build_and_run_model(soundList,
                        maxlen,
                        sound_idx,
                        distinct_sounds,
                        seq_len=50,
                        out_file='model.tflearn',
                        iters=20,
                        durations=False):
    """ Build and create sequences with different temperatures (measure of "innovation")"""
    tf.reset_default_graph()
    with tf.Graph().as_default():
        net = tflearn.input_data([None, maxlen, len(sound_idx)])
        net = tflearn.lstm(net, 512, return_seq=True)
        net = tflearn.dropout(net, 0.5)
        net = tflearn.lstm(net, 512, return_seq=True)
        net = tflearn.dropout(net, 0.5)
        net = tflearn.lstm(net, 512)
        net = tflearn.fully_connected(net,
                                      len(sound_idx),
                                      activation='softmax')
        net = tflearn.regression(net,
                                 optimizer='adam',
                                 loss='categorical_crossentropy',
                                 learning_rate=0.001)
        model = tflearn.SequenceGenerator(net,
                                          dictionary=sound_idx,
                                          seq_maxlen=maxlen,
                                          clip_gradients=5.0,
                                          tensorboard_verbose=3)


#    X = np.zeros((len(primary_sounds), maxlen, len(distinct_sounds)),
#                             dtype=np.bool)
#    y = np.zeros((len(primary_sounds), len(distinct_sounds)), dtype=np.bool)
#    for i, chunk in enumerate(primary_sounds):
#
#        for t, sound in enumerate(chunk):
#            X[i, t, sound_idx[sound]] = 1
#        y[i, sound_idx[next_sounds[i]]] = 1
#    for i in range(0, len(primary_sounds)-batch, batch):
#            X = np.zeros((batch, maxlen, len(distinct_sounds)),

#                             dtype=np.bool)
#            y = np.zeros((batch, len(distinct_sounds)), dtype=np.bool)
#            for i, chunk in enumerate(primary_sounds[i:i+batch]):
#                for t, sound in enumerate(chunk):
#                    X[i, t, sound_idx[sound]] = 1
#                y[i, sound_idx[next_sounds[i]]] = 1
    load = False
    big_batch = 2 * 42870
    for i in range(0, len(soundList), big_batch):
        X, y = preprocess(soundList[i:i + big_batch], distinct_sounds, maxlen,
                          sound_idx)
        if not load:
            model.fit(X, y, validation_set=0.1, batch_size=128, n_epoch=1)
            #model.save(out_file)
        else:
            pass
            #model = model.load(out_file)
    print('Trained')
    temperatures_low = []
    temperatures_high = []
    temperatures_mid = []
    for i in range(iters):
        seed = select_random_seed(soundList, maxlen)
        temperatures_low.append(
            model.generate(seq_len, temperature=0.1, seq_seed=seed))
        temperatures_mid.append(
            model.generate(seq_len, temperature=0.5, seq_seed=seed))
        temperatures_high.append(
            model.generate(seq_len, temperature=1, seq_seed=seed))
        #        temperatures_high.append(model.generate(seq_len, temperature=1,
        #                                                seq_seed=seed))
        print(i)
    print('Sequences generated')
    return temperatures_high, temperatures_low, temperatures_mid
def FacePatches_NET_3C_3I_2P(eyep, mouthp):
    ###using net 26
    e_net = tflearn.conv_2d(eyep,
                            8,
                            3,
                            activation='relu',
                            name='eye_conv1_1_3x3')
    e_net = tflearn.conv_2d(e_net,
                            8,
                            3,
                            activation='relu',
                            name='eye_conv1_2_3x3')
    e_net = tflearn.max_pool_2d(e_net, 2, 2, name='eye_pool1')
    efc3 = tflearn.fully_connected(e_net,
                                   1024,
                                   activation='tanh',
                                   name='eye_fc3')
    e_net = tflearn.conv_2d(e_net,
                            32,
                            3,
                            activation='relu',
                            name='eye_conv2_1_3x3')
    e_net = tflearn.conv_2d(e_net,
                            32,
                            3,
                            activation='relu',
                            name='eye_conv2_2_3x3')
    e_net = tflearn.max_pool_2d(e_net, 2, 2, name='eye_pool2')
    efc2 = tflearn.fully_connected(e_net,
                                   1024,
                                   activation='tanh',
                                   name='eye_fc2')
    e_net = tflearn.conv_2d(e_net,
                            128,
                            3,
                            activation='relu',
                            name='eye_conv3_1_3x3')
    e_net = tflearn.conv_2d(e_net,
                            128,
                            3,
                            activation='relu',
                            name='eye_conv3_2_3x3')
    e_net = tflearn.max_pool_2d(e_net, 2, 2, name='eye_pool3')
    e_net = tflearn.fully_connected(e_net,
                                    1024,
                                    activation='tanh',
                                    name='eye_fc1')
    e_net = tf.concat([e_net, efc2, efc3], 1, name='eye_fc')

    mo_net = tflearn.conv_2d(mouthp,
                             8,
                             3,
                             activation='relu',
                             name='mouth_conv1_1_3x3')
    mo_net = tflearn.conv_2d(mo_net,
                             8,
                             3,
                             activation='relu',
                             name='mouth_conv1_2_3x3')
    mo_net = tflearn.max_pool_2d(mo_net, 2, 2, name='mouth_pool1')
    mfc3 = tflearn.fully_connected(mo_net,
                                   1024,
                                   activation='tanh',
                                   name='mouth_fc3')
    mo_net = tflearn.conv_2d(mo_net,
                             32,
                             3,
                             activation='relu',
                             name='mouth_conv2_1_3x3')
    mo_net = tflearn.conv_2d(mo_net,
                             32,
                             3,
                             activation='relu',
                             name='mouth_conv2_2_3x3')
    mo_net = tflearn.max_pool_2d(mo_net, 2, 2, name='mouth_pool2')
    mfc2 = tflearn.fully_connected(mo_net,
                                   1024,
                                   activation='tanh',
                                   name='mouth_fc2')
    mo_net = tflearn.conv_2d(mo_net,
                             128,
                             3,
                             activation='relu',
                             name='mouth_conv3_1_3x3')
    mo_net = tflearn.conv_2d(mo_net,
                             128,
                             3,
                             activation='relu',
                             name='mouth_conv3_2_3x3')
    mo_net = tflearn.max_pool_2d(mo_net, 2, 2, name='mouth_pool3')
    mo_net = tflearn.fully_connected(mo_net,
                                     1024,
                                     activation='tanh',
                                     name='mouth_fc1')
    mo_net = tf.concat([mo_net, mfc2, mfc3], 1, name='mouth_fc')

    fc_net = tf.concat([e_net, mo_net], 1, name='fusion_1')
    fc_net = tflearn.fully_connected(fc_net,
                                     2048,
                                     activation='relu',
                                     name='fc1')
    fc_net = tflearn.dropout(fc_net, 0.8, name='drop1')
    fc_net = tflearn.fully_connected(fc_net,
                                     2048,
                                     activation='relu',
                                     name='fc2')
    fc_net = tflearn.dropout(fc_net, 0.8, name='drop2')
    softmax = tflearn.fully_connected(fc_net,
                                      7,
                                      activation='softmax',
                                      name='prob')
    return softmax
Exemple #42
0
one_hot = OneHotEncoder(sparse=False)
# .fit(train_Y)
train_Y = one_hot.fit_transform(np.reshape(train_Y, (-1, 1)))
test_Y = one_hot.transform(np.reshape(test_Y, (-1, 1)))
print(train_Y)
lab_size = len(train_Y[0])
# Building deep neural network
# X = tf.placeholder(shape=(None, vector_size), dtype=tf.float32)
# Y = tf.placeholder(shape=(None, lab_size), dtype=tf.float32)

drop_pro = 0.8

input_layer = tflearn.input_data(shape=[None, vector_size])
dense1 = tflearn.fully_connected(input_layer, 64, activation='tanh',
                                 regularizer='L2', weight_decay=0.001)
dropout1 = tflearn.dropout(dense1, drop_pro)
dense2 = tflearn.fully_connected(dropout1, 128, activation='tanh',
                                 regularizer='L2', weight_decay=0.001)
dropout2 = tflearn.dropout(dense2, drop_pro)
dense3 = tflearn.fully_connected(dropout1, 64, activation='tanh',
                                 regularizer='L2', weight_decay=0.001)
dropout3 = tflearn.dropout(dense2, drop_pro)
softmax = tflearn.fully_connected(dropout3, lab_size, activation='softmax')

# Regression using SGD with learning rate decay and Top-3 accuracy
sgd = tflearn.SGD(learning_rate=0.01, lr_decay=0.96, decay_step=1000)
adam = tflearn.Adam(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False, name="Adam")
# loss = tflearn.losses.L2()
# top_k = tflearn.metrics.Top_k(6)
accu  = tflearn.metrics.Accuracy()
net = tflearn.regression(softmax, optimizer=adam, metric=accu)
import pickle
from six.moves import urllib

import tflearn
from tflearn.data_utils import *

path = "zu05056.txt"

maxlen = 25

X, Y, char_idx = \
    textfile_to_semi_redundant_sequences(path, seq_maxlen=maxlen, redun_step=3)

g = tflearn.input_data([None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512)
g = tflearn.dropout(g, 0.5)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                       learning_rate=0.001)

m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                              seq_maxlen=maxlen,
                              clip_gradients=5.0,
                              checkpoint_path='model_didactic')

for i in range(10):
    seed = random_sequence_from_textfile(path, maxlen)
Y = pickle.load(open('/home/cc/Data/pickle_files/devfull_2secY.p', 'rb'))
speakers = speech_data.get_speakers(training)

# input size for fully connected layers
layer_size = int(sys.argv[1])
dropout = float(sys.argv[2])

# define the network and the model for training
tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.5)

# for just mfcc
net = tflearn.input_data(shape=[None, 20, 87])
net = tflearn.fully_connected(net, layer_size)
net = tflearn.fully_connected(net, layer_size)
net = tflearn.fully_connected(net, layer_size)
net = tflearn.dropout(net, dropout)
net = tflearn.fully_connected(net, len(speakers), activation='softmax')
net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy')

# now train the model!
t0 = time.time()
model = tflearn.DNN(net)
model.fit(X, Y, n_epoch=100, show_metric=True, snapshot_step=1000, validation_set=0.1)
t1 = time.time()

# load the test mfcc values for testing from pickled data
Xtest = pickle.load(open('/home/cc/Data/pickle_files/devfull_2sectestX.p', 'rb'))
Ytest = pickle.load(open('/home/cc/Data/pickle_files/devfull_2sectestY.p', 'rb'))

# now test model over the test segments
result = model.predict(Xtest)
def main():

    path = FLAGS.dataset

    # We avoid using fixed padding and simply calculate the max lenght of our input set.
    if FLAGS.max_sequence_lenght < 1:
        maxlen = find_maxlenght(path)
    else:
        maxlen = FLAGS.max_sequence_lenght

    print("MaxLen = ", maxlen)
    X, Y, char_idx = textfile_to_semi_redundant_sequences(path,
                                                          seq_maxlen=maxlen,
                                                          redun_step=3)

    # Here we define our network structure, using common used values for node dimensions and dropout

    # Input Layer
    g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])

    # Create our hidden LSTM Layers from parameters
    for i in range(FLAGS.hidden_layer_size):
        g = tflearn.lstm(g, 512, return_seq=True)
        g = tflearn.dropout(g, 0.5)

    # Finally our last lstm layer and a fully_connected with softmax activation for the output
    g = tflearn.lstm(g, 512)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.fully_connected(g, len(char_idx), activation='softmax')

    # Let's not forget our regression!
    g = tflearn.regression(g,
                           optimizer='adam',
                           loss='categorical_crossentropy',
                           learning_rate=0.001)

    # wrap it up in a sequence generator
    m = tflearn.SequenceGenerator(g,
                                  dictionary=char_idx,
                                  seq_maxlen=maxlen,
                                  clip_gradients=5.0,
                                  checkpoint_path='model_' +
                                  os.path.basename(path))
    train = True
    if os.path.exists(FLAGS.model_file):
        # Load our pre-train model from file
        print("Loading model from file ", FLAGS.model_file)
        load_model(m)
        train = False

    # Let's train it
    if train:
        print("Training model...")
        m.fit(X,
              Y,
              validation_set=0.1,
              batch_size=FLAGS.batch_size,
              n_epoch=FLAGS.epochs,
              run_id=os.path.basename(path))

        # save our results
        print("Saving trained model to file ", FLAGS.model_file)
        save_model(m)

    # Generate a test result
    generate(m, maxlen)

    # Interactive Session:
    try:
        import readline
        temp = 1.0
        while temp > 0.0:
            temp = float(raw_input('Insert temperature for generation: '))
            FLAGS.temperature = temp
            generate(m, maxlen)
    except EOFError:
        print("Bye!")
        return
Exemple #46
0
def model1():
    for c, d in enumerate(uniq_diag[:20]):
        # Display the training diagnosis
        print("--------------------Training {}--------------------".format(d))

        # Run each iteration in a graph
        with tf.Graph().as_default():
            y = Y[d].astype(np.float32)
            y = y.reshape(-1, 1)
            y = to_categorical(
                y, nb_classes=2
            )  # Convert label to categorical to train with tflearn

            # Train and test data
            X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                                y,
                                                                test_size=0.1,
                                                                random_state=0)

            # Standardize the data
            sc = StandardScaler()
            sc.fit(X_train)
            X_test_sd = sc.transform(X_test)

            # Model
            input_layer = tflearn.input_data(shape=[None, 100], name='input')
            dense1 = tflearn.fully_connected(input_layer,
                                             128,
                                             activation='linear',
                                             name='dense1')
            dropout1 = tflearn.dropout(dense1, 0.8)
            dense2 = tflearn.fully_connected(dropout1,
                                             128,
                                             activation='linear',
                                             name='dense2')
            dropout2 = tflearn.dropout(dense2, 0.8)
            output = tflearn.fully_connected(dropout2,
                                             2,
                                             activation='softmax',
                                             name='output')
            regression = tflearn.regression(output,
                                            optimizer='adam',
                                            loss='categorical_crossentropy',
                                            learning_rate=.001)

            # Define model with checkpoint (autosave)
            model = tflearn.DNN(regression, tensorboard_verbose=3)

            # load the previously trained model
            model.load(
                'Saved_Models/Fully_Connected/dense_fully_connected_dropout_5645_{}.tfl'
                .format(d))

            # Find the probability of outputs
            y_pred_prob = np.array(model.predict(X_test_sd))[:, 1]
            # Find the predicted class
            y_pred = np.where(y_pred_prob > 0.5, 1., 0.)
            # Predicted class is the 2nd column in Y_test
            Y_test_dia = Y_test[:, 1]

            acc = accuracy_score(Y_test_dia, y_pred) * 100
            errors = (y_pred != Y_test_dia).sum()
            ps = precision_score(Y_test_dia, y_pred) * 100
            rs = recall_score(Y_test_dia, y_pred) * 100
            f1 = f1_score(Y_test_dia, y_pred) * 100
            confmat = confusion_matrix(y_true=Y_test_dia, y_pred=y_pred)

            print("Errors for %s    : %.f" % (d, errors))
            print("Accuracy for %s  : %.2f%%" % (d, acc))
            print("Precision for %s : %.2f%%" % (d, ps))
            print("Recall for %s    : %.2f%%" % (d, rs))
            print("F1 Score for %s  : %.2f%%" % (d, f1))
            print("Confusion Matrix for %s :" % d)
            print(confmat)

            # Input to roc_curve must be Target scores, can either be
            # probability estimates of the positive class, confidence values, or non-thresholded measure of decisions
            roc_area = roc_auc_score(Y_test_dia, y_pred_prob)
            print("ROC AUC for %s : %.2f" % (d, roc_area))

            print('\n')
            print('Completed : {0}/{1}'.format(c + 1, len(uniq_diag) / 4))
            print('--------------------{} Complete--------------------'.format(
                d))
            print('\n')