Example #1
0
    def generate_svg(self,
                     model,
                     hidden_layer_prefix='dense',
                     output_layer_name='output'):
        self.input_length = model.inputs[0].shape.dims[1].value
        self.hidden_layer_count = self.set_hidden_layer_count()
        self.known_layer_names = list(map(lambda u: hidden_layer_prefix + str(u), range(self.hidden_layer_count))) + \
            [output_layer_name]
        self.set_min_and_max_values(model)

        image = svgwrite.Drawing()

        self.draw_input_connections(image)

        for layer_index in range(0, len(self.known_layer_names)):
            x = self.x_offset + ((layer_index + 1) * self.x_spacing)
            layer_vars = tflearn.get_layer_variables_by_name(
                self.known_layer_names[layer_index])
            weights = model.get_weights(layer_vars[0])
            self.draw_layer_connections(image, weights, x)

        for layer_index in range(0, len(self.known_layer_names)):
            layer_vars = tflearn.get_layer_variables_by_name(
                self.known_layer_names[layer_index])
            self.draw_layer_nodes(image, layer_index, layer_vars, model)

        self.draw_input_nodes(image)
        return '<?xml version="1.0" encoding="UTF-8" standalone="no"?>' + image.tostring(
        )
Example #2
0
def train(args, glove, data, param_file_path):
    if glove is None:
        embedding_size = (utils.NUM_UNIQUE_TOKENS, int(args['--embedding-dims']))
    else:
        embedding_size = glove[0].shape

    print("Loading model definition for %s..." % args['--model'])
    net = models.get_model(args['--model'], embedding_size=embedding_size,
                           train_embedding=args['--train-embedding'],
                           hidden_dims=int(args['--hidden-dims']),
                           learning_rate=float(args['--learning-rate']))
    model = tflearn.DNN(net, clip_gradients=5., tensorboard_verbose=0)

    if args['--evaluate-only'] or args['--continue-training']:
        print("Loading saved parameters from %s" % param_file_path)
        model.load(param_file_path)
    elif glove is not None:
        print("Initializing word embedding...")
        # Retrieve embedding layer weights (only a single weight matrix, so index is 0)
        embedding_weights = tflearn.get_layer_variables_by_name('EmbeddingLayer')[0]
        # Initialize with glove embedding
        model.set_weights(embedding_weights, glove[0])

    if not args['--evaluate-only']:
        print("Training...")
        model.fit(data.trainX, data.trainY,
                  n_epoch=int(args['--epochs']),
                  validation_set=(data.valX, data.valY),
                  show_metric=True, batch_size=128,
                  run_id=os.path.splitext(param_file_path)[0])

        print("Saving parameters to %s" % param_file_path)
        model.save(param_file_path)

    return model
Example #3
0
def generate_model(train_x, train_y, input_embeddings):
    model = tflearn.DNN(rnn_net(), tensorboard_verbose=0)
    model.fit(train_x, train_y, batch_size=32)
    embedding_weights = tflearn.get_layer_variables_by_name(
        'EmbeddingLayer')[0]
    model.set_weights(embedding_weights, input_embeddings)
    return model
Example #4
0
def lstm(trainX, trainY, valX, valY, testX, input_weights):
    '''
	Standard lstm Network.
	'''
    # Network building LSTM
    net = tflearn.input_data([None, MAX_LENGHT])
    net = tflearn.embedding(net,
                            input_dim=input_weights.shape[0],
                            output_dim=input_weights.shape[1],
                            trainable=True,
                            name="EmbeddingLayer")
    net = tflearn.lstm(net, 128, dropout=0.5)
    net = tflearn.fully_connected(net, 12, activation='softmax')
    net = tflearn.regression(net,
                             optimizer='adam',
                             learning_rate=0.01,
                             loss='categorical_crossentropy')
    # Training
    model = tflearn.DNN(net, tensorboard_verbose=0)

    # Add embedding weights into the embedding layer
    embeddingWeights = tflearn.get_layer_variables_by_name("EmbeddingLayer")[0]
    model.set_weights(embeddingWeights, input_weights)

    model.fit(trainX,
              trainY,
              n_epoch=NB_EPOCHS,
              validation_set=(valX, valY),
              show_metric=True,
              batch_size=32)
    y_result = model.predict(testX)
    return y_result
    def train(self, trainX, trainY, model_path, ibatch_size=128):
        # Training
        gf = smart_open.smart_open(Config.GLOVE_EMBEDDINGS_PATH, 'rb')
        glove_embeddings = pickle.load(gf)
        shortened_embeddings = np.zeros((51887, 200))
        index = 0
        for item in glove_embeddings:
            shortened_embeddings[index, :] = item[:200]
            index += 1

        # Retrieve embedding layer weights (only a single weight matrix, so index is 0)
        embeddingWeights = tflearn.get_layer_variables_by_name(
            'EmbeddingLayer')[0]
        print('default embeddings: ', embeddingWeights[0])

        self.model.set_weights(embeddingWeights, shortened_embeddings)

        self.model.fit(trainX,
                       trainY,
                       validation_set=0.1,
                       n_epoch=10,
                       show_metric=True,
                       batch_size=ibatch_size)
        self.model.save(model_path)
        print('Model saved at the following location: ', model_path)
        return self.model
Example #6
0
def create_lstm(max_sequence_length, dict_size, word_vectors):
    net = tflearn.input_data([None, max_sequence_length])

    #vocab_dim = 50
    #n_symbols = dict_size
    #embedding_weights = np.zeros((n_symbols, vocab_dim))
    #for word, index in index_dict.items():
    #    embedding_weights[index, :] = word_vectors[word]

    # define inputs here
    #embedding_layer = Embedding(output_dim=vocab_dim, input_dim=n_symbols, trainable=True)
    #embedding_layer.build((None,))  # if you don't do this, the next step won't work
    #embedding_layer.set_weights([embedding_weights])

    emb = tflearn.embedding(net, input_dim=dict_size, output_dim=50, trainable=True, name='EmbeddingLayer')
    net = tflearn.lstm(emb, 64, dropout=0.75)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net, tensorboard_verbose=0, tensorboard_dir='../tensorboard/tensorboard_lstm')

    embeddingWeights = tflearn.get_layer_variables_by_name('EmbeddingLayer')[0]
    model.set_weights(embeddingWeights, word_vectors)
    print(model.get_weights(emb.W))

    return model
Example #7
0
 def build_network(self):
     # Smaller 'AlexNet'
     # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
     print('[+] Building CNN')
     self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1])
     print(input_data)
     self.network = conv_2d(self.network, 64, 5, activation='relu')
     #self.network = local_response_normalization(self.network)
     self.network = max_pool_2d(self.network, 3, strides=2)
     self.network = conv_2d(self.network, 64, 5, activation='relu')
     layer1 = tflearn.get_layer_variables_by_name('Conv2D_1')
     #print(layer1)
     self.network = max_pool_2d(self.network, 3, strides=2)
     self.network = conv_2d(self.network, 128, 4, activation='relu')
     self.network = dropout(self.network, 0.3)
     self.network = fully_connected(self.network, 3072, activation='relu')
     self.network = fully_connected(self.network,
                                    len(EMOTIONS),
                                    activation='softmax')
     self.network = regression(self.network,
                               optimizer='momentum',
                               loss='categorical_crossentropy')
     self.model = tflearn.DNN(self.network,
                              checkpoint_path=SAVE_DIRECTORY +
                              '/emotion_recognition',
                              max_checkpoints=1,
                              tensorboard_verbose=2)
     self.load_model()
Example #8
0
def cnn_3_filters(trainX, trainY, valX, valY, testX, input_weights):
    '''
	A CNN with three convolutional layers as in Kim Yoon (Convolutional Neural Networks for Sentence Classification)

	'''
    # Building convolutional network
    network = input_data(shape=[None, MAX_LENGHT], name='input')
    network = tflearn.embedding(network,
                                input_dim=input_weights.shape[0],
                                output_dim=input_weights.shape[1],
                                trainable=True,
                                name="EmbeddingLayer")
    branch1 = conv_1d(network,
                      128,
                      3,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    branch2 = conv_1d(network,
                      128,
                      4,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    branch3 = conv_1d(network,
                      128,
                      5,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.5)
    network = fully_connected(network, 12, activation='softmax')
    network = regression(network,
                         optimizer='adam',
                         learning_rate=0.001,
                         loss='categorical_crossentropy',
                         name='target')
    # Training
    model = tflearn.DNN(network, tensorboard_verbose=1)

    # Add embedding weights into the embedding layer
    embeddingWeights = tflearn.get_layer_variables_by_name("EmbeddingLayer")[0]
    model.set_weights(embeddingWeights, input_weights)

    print("Start trianing CNN...")
    model.fit(trainX,
              trainY,
              n_epoch=NB_EPOCHS,
              validation_set=(valX, valY),
              shuffle=True,
              show_metric=True,
              batch_size=32)

    y_result = model.predict(testX)
    return y_result
def filter_displayer(model, layer, padding=1):
    """
    The function displays the filters of layer
    :param model: tflearn obj, DNN model of tflearn
    :param layer: string or tflearn obj., the layer whose weights 
    we want to display
    :param padding: The number of pixels between each two filters
    :return: imshow the purput image
    """
    if isinstance(layer, str):
        vars = tflearn.get_layer_variables_by_name(layer)
        variable = vars[0]
    else:
        variable = layer.W
    filters = model.get_weights(variable)

    print(filters.shape[0], filters.shape[1], filters.shape[2],
          filters.shape[3])
    # n is the number of convolutions per filter
    n = filters.shape[2] * filters.shape[3] / 2
    # Ensure the output image is rectangle with width twice as
    # big as height
    # and compute number of tiles per row (nc) and per column (nr)
    nr = int(np.ceil(np.sqrt(n)))
    nc = 2 * nr
    # Assuming that the filters are square
    filter_size = filters.shape[0]
    # Size of the output image with padding
    img_w = nc * (filter_size + padding) - padding
    img_h = nr * (filter_size + padding) - padding
    # Initialize the output image
    filter_img = np.zeros((img_h, img_w))

    # Normalize image to 0-1
    fmin = filters.min()
    fmax = filters.max()
    filters = (filters - fmin) / (fmax - fmin)

    # Starting the tiles
    filter_x = 0
    filter_y = 0
    for r in range(filters.shape[3]):
        for c in range(filters.shape[2]):
            if filter_x == nc:
                filter_y += 1
                filter_x = 0
            for i in range(filter_size):
                for j in range(filter_size):
                    filter_img[filter_y * (filter_size + padding) + i, filter_x * (filter_size + padding) + j] = \
                        filters[i, j, c, r]
            filter_x += 1

    # Plot figure
    plt.figure(figsize=(10, 5))
    plt.axis('off')
    plt.imshow(filter_img, cmap='gray', interpolation='nearest')
    plt.show()
Example #10
0
def display_convolutions(model, layer, padding=4, filename=''):
    if isinstance(layer, six.string_types):
        vars = tflearn.get_layer_variables_by_name(layer)
        vars = tflearn.get_layer_variables_by_name(layer)
        variable = vars[0]
    else:
        variable = layer.W

    data = model.get_weights(variable)

    filter_size = data.shape[0]
    filter_depth = data.shape[2]
    number_of_filters = data.shape[3]
    N = filter_depth * number_of_filters  # N is the total number of convolutions

    # Ensure the resulting image is square
    filters_per_row = int(np.ceil(np.sqrt(number_of_filters)))
    result_size = filters_per_row * (filter_size + padding) - padding

    result = np.zeros((result_size, result_size, 4))
    filter_x = 0
    filter_y = 0
    for filter_number in range(number_of_filters):
        if filter_x == filters_per_row:
            filter_y += 1
            filter_x = 0
        for i in range(filter_size):
            for j in range(filter_size):
                plot_i = filter_y * (filter_size + padding) + i
                plot_j = filter_x * (filter_size + padding) + j

                result[plot_i, plot_j, 0] = data[i, j, 0, filter_number]
                result[plot_i, plot_j, 1] = data[i, j, 1, filter_number]
                result[plot_i, plot_j, 2] = data[i, j, 2, filter_number]
                result[plot_i, plot_j, 3] = data[i, j, 3, filter_number]
        filter_x += 1

    # Normalize image to 0-1
    min, max = result.min(), result.max()
    result = (result - min) / (max - min)

    plot_figure(result, filename)
Example #11
0
def display_convolutions(model, layer, padding=4, filename=''):
    if isinstance(layer, six.string_types):
        vars = tflearn.get_layer_variables_by_name(layer)
        variable = vars[0]
    else:
        variable = layer.W

    data = model.get_weights(variable)

    # N is the total number of convolutions
    N = data.shape[2] * data.shape[3]

    # Ensure the resulting image is square
    filters_per_row = int(np.ceil(np.sqrt(N)))
    # Assume the filters are square
    filter_size = data.shape[0]
    # Size of the result image including padding
    result_size = filters_per_row * (filter_size + padding) - padding
    # Initialize result image to all zeros
    result = np.zeros((result_size, result_size))

    # Tile the filters into the result image
    filter_x = 0
    filter_y = 0
    for n in range(data.shape[3]):
        for c in range(data.shape[2]):
            if filter_x == filters_per_row:
                filter_y += 1
                filter_x = 0
            for i in range(filter_size):
                for j in range(filter_size):
                    result[filter_y * (filter_size + padding) + i, filter_x * (filter_size + padding) + j] = \
                        data[i, j, c, n]
            filter_x += 1

    # Normalize image to 0-1
    min = result.min()
    max = result.max()
    result = (result - min) / (max - min)

    # Plot figure
    plt.figure(figsize=(10, 10))
    plt.axis('off')
    plt.imshow(result, cmap='gray', interpolation='nearest')

    # Save plot if filename is set
    if filename != '':
        plt.savefig(filename, bbox_inches='tight', pad_inches=0)

    plt.show()
Example #12
0
def display_convolutions(model, layer, padding=3, axis=None, cmap='gray'):
    if isinstance(layer, str):
        variable = tflearn.get_layer_variables_by_name(layer)
        variable = variable[0]
    else:
        variable = layer.W

    data = model.get_weights(variable)

    # N is the total number of convolutions
    N = data.shape[2] * data.shape[3]

    # Ensure the resulting image is square
    filters_per_row = int(np.ceil(np.sqrt(N)))
    # Assume the filters are square
    filter_size = data.shape[0]
    # Size of the result image including padding
    result_size = filters_per_row * (filter_size + padding) - padding
    # Initialize result image to all zeros
    result = np.zeros((result_size, result_size))

    # Tile the filters into the result image
    filter_x = 0
    filter_y = 0
    for n in range(data.shape[3]):
        for c in range(data.shape[2]):
            if filter_x == filters_per_row:
                filter_y += 1
                filter_x = 0
            for i in range(filter_size):
                for j in range(filter_size):
                    result[filter_y * (filter_size + padding) + i, filter_x * (filter_size + padding) + j] = \
                        data[i, j, c, n]
            filter_x += 1

    # Normalize image to 0-1
    min = result.min()
    max = result.max()
    result = (result - min) / (max - min)

    if not axis:
        # Plot figure
        plt.figure(figsize=(10, 10))
        axis = plt.gca()
    axis.imshow(result, cmap=cmap, interpolation='nearest')

    return axis
Example #13
0
    def build_model(self):

        trainable, net = self.build_embedding_layer()
        net = self.transform_embedded_sequences(net)
        net = regression(
            net,
            optimizer=self.optimizer,
            loss='categorical_crossentropy',
            name='target')
        model = tflearn.DNN(net, tensorboard_verbose=0)

        if not trainable:
            embedding_matrix = get_embedding_matrix(self.vocab, self.embeddings_path)
            embeddingWeights = tflearn.get_layer_variables_by_name('EmbeddingLayer')[0]
            model.set_weights(embeddingWeights, embedding_matrix)

        return model
def train(data_dict, model_type, vector_type,flag, embed_size, dump_embeddings=False):
    print("trainnn")   
    data, trainX, trainY, testX, testY, vocab_processor = return_data(data_dict)
    
    vocab_size = len(vocab_processor.vocabulary_)
    NUM_CLASSES = 3
    print(NUM_CLASSES)
    print(trainX.shape[1])
    print("Vocabulary Size: {:d}".format(vocab_size))
    vocab = vocab_processor.vocabulary_._mapping
    print(model_type)
    print(vector_type)
    print("Running Model: " + model_type + " with word vector initiliazed with " + vector_type + " word vectors.")
    model = get_model(model_type, trainX.shape[1], vocab_size, embed_size, NUM_CLASSES, LEARN_RATE)

    initial_weights = model.get_weights()
    shuffle_weights(model, initial_weights)
    
    if(vector_type!="random"):
        print("Word vectors used: " + vector_type)
        model.layers[0].set_weights([map_embedding_weights(get_embeddings_dict(vector_type, embed_size), vocab, embed_size)])
        model.fit(trainX, trainY, epochs=EPOCHS, shuffle=True, batch_size=BATCH_SIZE, 
              verbose=1)
    else:
        model.fit(trainX, trainY, epochs=EPOCHS, shuffle=True, batch_size=BATCH_SIZE, 
              verbose=1)
            
    if (dump_embeddings==True):
        if(model_type == 'cnn'):
            embeddingWeights = tflearn.get_layer_variables_by_name('EmbeddingLayer')[0]
        else:
            embed = model.layers[0].get_weights()[0]
    
        embed_filename = output_folder_name + data + "_" + model_type + "_" + vector_type + "_" + str(embed_size) + ".pkl"
        embed.dump(embed_filename)
        
        vocab_filename = output_folder_name + data + "_" + model_type + "_" + vector_type + "_" + str(embed_size) + "_dict.json"
        reverse_vocab_filename = output_folder_name + data + "_" + model_type + "_" + vector_type + "_" + str(embed_size) + "_reversedict.json"
        
        with open(vocab_filename, 'w') as fp:
            json.dump(vocab_processor.vocabulary_._mapping, fp)
        with open(reverse_vocab_filename, 'w') as fp:
            json.dump(vocab_processor.vocabulary_._reverse_mapping, fp)
    return  evaluate_model(model, testX, testY,flag)
Example #15
0
 def set_min_and_max_values(self, model):
     for layer_name in self.known_layer_names:
         layer_vars = tflearn.get_layer_variables_by_name(layer_name)
         weights = model.get_weights(layer_vars[0])
         tmp_max = float(max(map(max, weights)))
         self.max_weight_percentage = float(
             max(tmp_max, self.max_weight_percentage))
         tmp_min = float(min(map(max, weights)))
         self.min_weight_percentage = float(
             max(tmp_min, self.min_weight_percentage))
         self.max_node_count = max(max(map(len, weights)),
                                   self.input_length)
         with model.session.as_default():
             if len(layer_vars) > 1:
                 bias_values = tflearn.variables.get_value(layer_vars[1])
                 self.min_bias_percentage = float(
                     min((self.min_bias_percentage,
                          float(min(bias_values)))))
                 self.max_bias_percentage = float(
                     max((self.max_bias_percentage,
                          float(max(bias_values)))))
Example #16
0
import cv2
from constants import *
from Training import EmotionRecognition
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import tflearn
import tensorflow as tf

# Load Model
network = EmotionRecognition()
network.build_network()

# Convolution_1
conv_1_var = tflearn.get_layer_variables_by_name("Conv2d_1")
print("Convolution_1 layer weights:")
print(network.model.get_weights(conv_1_var[0]))
print("Convolution_1 layer Bias:")
print(network.model.get_weights(conv_1_var[1]))

# Convolution_2a
conv_2a_var = tflearn.get_layer_variables_by_name("Conv_2a_FX1")
print("Convolution_2a layer weights:")
print(network.model.get_weights(conv_2a_var[0]))
print("Convolution_2a layer Bias:")
print(network.model.get_weights(conv_2a_var[1]))

# Convolution_2b
conv_2b_var = tflearn.get_layer_variables_by_name("Conv_2b_FX1")
print("Convolution_2b layer weights:")
print(network.model.get_weights(conv_2b_var[0]))
print("Convolution_2b layer Bias:")
def my_objective(y_pred, y_true):
    return y_pred


net = tflearn.regression(net, placeholder=None, loss=my_objective, learning_rate=1.0e-5)

model = tflearn.DNN(net, tensorboard_verbose=3)

model.fit(X_inputs={'main_statement_input': np.transpose(training_data_main[:main_statement_time_steps], (1, 0, 2)),
                    'main_hyps_input': np.transpose(training_data_main[main_statement_time_steps:], (1, 0, 2)),
                    'prop_statement_input_1': np.transpose(training_data_props[0][:prop_statement_time_steps],
                                                           (1, 0, 2)),
                    'prop_hyps_input_1': np.transpose(training_data_props[0][prop_statement_time_steps:], (1, 0, 2)),
                    'prop_statement_input_2': np.transpose(training_data_props[1][:prop_statement_time_steps],
                                                           (1, 0, 2)),
                    'prop_hyps_input_2': np.transpose(training_data_props[1][prop_statement_time_steps:], (1, 0, 2)),
                    'prop_statement_input_3': np.transpose(training_data_props[2][:prop_statement_time_steps],
                                                           (1, 0, 2)),
                    'prop_hyps_input_3': np.transpose(training_data_props[2][prop_statement_time_steps:], (1, 0, 2)),
                    'prop_statement_input_4': np.transpose(training_data_props[3][:prop_statement_time_steps],
                                                           (1, 0, 2)),
                    'prop_hyps_input_4': np.transpose(training_data_props[3][prop_statement_time_steps:], (1, 0, 2)),
                    'prop_statement_input_5': np.transpose(training_data_props[4][:prop_statement_time_steps],
                                                           (1, 0, 2)),
                    'prop_hyps_input_5': np.transpose(training_data_props[4][prop_statement_time_steps:], (1, 0, 2))},
          Y_targets=None, n_epoch=30, batch_size=100)

model.save('pred_model.tflearn')

np.save('Pred_W', model.get_weights(tflearn.get_layer_variables_by_name('Weight')[0]))
Example #18
0
# Sequence padding
# random.seed(1234)
# trainX = pad_sequences(X, maxlen=5, value=0.)
# trainY = [[random.uniform(-1, 1)] for i in range(len(X))] #PROOF OF CONCEPT
# print X
# print len(X)

splitIdx = len(X) / 5
trainX, valX, testX = splitData(X, 0.7, 0.1)
trainY, valY, testY = splitData(Y, 0.7, 0.1)

# Training
# net = fat_one_layer_LSTM()
net = feedforward(embedding, maxClasses)

model = tflearn.DNN(net)  #, tensorboard_verbose=3)

#insert our doc2vec embeddings here
embeddingWeights = tflearn.get_layer_variables_by_name('Embedding')[0]
model.set_weights(embeddingWeights, embedding)

model.fit(trainX,
          trainY,
          validation_set=(valX, valY),
          show_metric=True,
          batch_size=32,
          n_epoch=100)

print(trainY)
print(model.predict(testX))
# model.save("rnn")
Example #19
0
def train(data_dict,
          model_type,
          vector_type,
          embed_size,
          dump_embeddings=False):

    data, trainX, trainY, testX, testY, vocab_processor = return_data(
        data_dict)
    vocab_size = len(vocab_processor.vocabulary_)
    print("Vocabulary Size: {:d}".format(vocab_size))
    vocab = vocab_processor.vocabulary_._mapping

    print("Running Model: " + model_type +
          " with word vector initiliazed with " + vector_type +
          " word vectors.")

    if (model_type != "cnn"):
        model = get_model(model_type, trainX.shape[1], vocab_size, embed_size,
                          4, LEARN_RATE)
        initial_weights = model.get_weights()
        shuffle_weights(model, initial_weights)
    else:
        model, network = get_model(model_type, trainX.shape[1], vocab_size,
                                   embed_size, 4, LEARN_RATE)
        initial_weights = model.get_weights(network.W)
        weights = [
            np.random.permutation(w.flat).reshape(w.shape)
            for w in initial_weights
        ]
        weights = np.asarray(weights).reshape(initial_weights.shape)
        model.set_weights(network.W, weights)

    if (model_type == 'cnn'):
        if (vector_type != "random"):
            print("Word vectors used: " + vector_type)
            embeddingWeights = tflearn.get_layer_variables_by_name(
                'EmbeddingLayer')[0]
            model.set_weights(
                embeddingWeights,
                map_embedding_weights(
                    get_embeddings_dict(vector_type, embed_size), vocab,
                    embed_size))
            model.fit(trainX,
                      trainY,
                      n_epoch=EPOCHS,
                      shuffle=True,
                      show_metric=True,
                      batch_size=BATCH_SIZE)
        else:
            model.fit(trainX,
                      trainY,
                      n_epoch=EPOCHS,
                      shuffle=True,
                      show_metric=True,
                      batch_size=BATCH_SIZE)
    else:
        if (vector_type != "random"):
            print("Word vectors used: " + vector_type)
            model.layers[0].set_weights([
                map_embedding_weights(
                    get_embeddings_dict(vector_type, embed_size), vocab,
                    embed_size)
            ])
            model.fit(trainX,
                      trainY,
                      epochs=EPOCHS,
                      shuffle=True,
                      batch_size=BATCH_SIZE,
                      verbose=1)
        else:
            model.fit(trainX,
                      trainY,
                      epochs=EPOCHS,
                      shuffle=True,
                      batch_size=BATCH_SIZE,
                      verbose=1)
    return evaluate_model(model, testX, testY)
Example #20
0
		# spatially dropped and top element (winners)
		return net * drop, tf.reshape(th, tf.stack([b, c]))  # b, c


if __name__ == '__main__':
	"""
    let us test it on MNIST database"""
	import tflearn.datasets.mnist as mnist

	X, _, valX, _ = mnist.load_data(one_hot=True)
	X = X[:500].reshape([-1, 28, 28, 1])
	valX = valX[:100].reshape([-1, 28, 28, 1])

	with tf.Session() as sess:
		ae = sparseAE(sess)
		ae.build_model([None, 28, 28, 1])
		# train the Autoencoder
		ae.train(X, valX, n_epochs=1)  # valX for validation
		# compute the output for a certain input
		out = ae.model.predict(X[0].reshape([-1, 28, 28, 1]))
		print(out)
		# get the weights of a certain layer
		vars = tflearn.get_layer_variables_by_name('conv3')  # in this case, it is the learned features
		W = ae.model.get_weights(vars[0])
		print(W.shape)
		# get output of encoder for certain input
		m2 = tflearn.DNN(ae.sparse_rep, session=sess)
		print(m2.predict(X[0].reshape([-1, 28, 28, 1])))
		# save and load the model
		ae.save('./sparseAE.tflearn')
		ae.load('./sparseAE.tflearn')
Example #21
0
        x = tflearn.fully_connected(x, 256, activation='relu')
        x = tflearn.fully_connected(x, 1, activation='sigmoid')
        return x


gen_input = tflearn.input_data(shape=[None, z_dim], name='input_noise')
disc_input = tflearn.input_data(shape=[None, 784], name='disc_input')

gen_sample = generate(gen_input)
disc_real = discrimination(disc_input)
disc_fake = discrimination(gen_sample)

disc_loss = -tf.reduce_mean(tf.log(disc_real)) + tf.log(1. - disc_fake)
gen_loss = -tf.reduce_mean(tf.log(disc_fake))

gen_vars = tflearn.get_layer_variables_by_name('Generate')
gen_model = tflearn.regression(gen_sample,
                               placeholder=None,
                               optimizer='adam',
                               loss=gen_loss,
                               trainable_vars=gen_vars,
                               batch_size=64,
                               name='target_gen',
                               op_name='GEN')
disc_vars = tflearn.get_layer_variables_by_scope('Discriminator')
disc_model = tflearn.regression(disc_real,
                                placeholder=None,
                                optimizer='adam',
                                loss=disc_loss,
                                trainable_vars=disc_vars,
                                batch_size=64,
Example #22
0
network, _ = architectures.build_network(arch, network, CLASSES, None)

# model definition
model = tflearn.DNN(network,
                    checkpoint_path='models',
                    max_checkpoints=1,
                    tensorboard_verbose=0)  # tensorboard_dir='logs'

print("[INFO] Loading trained model...")
model.load(modelpath)
print("[INFO] Model: ", modelpath)
print("[INFO] Trained model loaded!\n")

# get layer by its name
if isinstance(layer, six.string_types):
    vars = tflearn.get_layer_variables_by_name(layer)
    variable = vars[0]
else:
    variable = layer.W

# load weights (learnt filters) and plots them
weights = model.get_weights(variable)
print("[INFO] Weights shape: ", weights.shape)
plot_conv_weights(weights, input_channel=ichannel)

# tries to load a image
load = True
try:
    if (ichannel == 3):
        img = scipy.ndimage.imread(sys.argv[5], mode='RGB', flatten=False)
    else:
Example #23
0
 def set_hidden_layer_count(self):
     x = 0
     while tflearn.get_layer_variables_by_name('dense' + str(x)):
         x += 1
     return x
Example #24
0
def train_model(train, test, vocab_size, n_epoch=5, n_units=128):
    '''Method to load the dataset and train the RNN model.'''

    train_x = train['sub_seqs']
    train_y = train['sub_label']
    test_x = test['sub_seqs']
    test_y = test['sub_label']
    sequence_chunk_size = 15
    learning_rate = 0.0001
    dropout = 0.6

    # Sequence padding
    train_x = pad_sequences(train_x,
                            maxlen=sequence_chunk_size,
                            value=0.,
                            padding='post')
    test_x = pad_sequences(test_x,
                           maxlen=sequence_chunk_size,
                           value=0.,
                           padding='post')

    # Converting labels to binary vectors
    train_y = to_categorical(train_y, nb_classes=vocab_size)
    test_y = to_categorical(test_y, nb_classes=vocab_size)

    print("Building network topology...")
    # Network building
    net = tflearn.input_data([None, 15])
    net = tflearn.embedding(net,
                            input_dim=vocab_size,
                            output_dim=128,
                            trainable=True)
    net = tflearn.gru(net,
                      n_units=n_units,
                      dropout=dropout,
                      weights_init=tflearn.initializations.xavier(),
                      return_seq=False)
    net = tflearn.fully_connected(
        net,
        vocab_size,
        activation='softmax',
        weights_init=tflearn.initializations.xavier())
    net = tflearn.regression(net,
                             optimizer='adam',
                             learning_rate=learning_rate,
                             loss='categorical_crossentropy')

    # Training
    model = tflearn.DNN(net, tensorboard_verbose=2)

    print("Training model...")
    model.fit(train_x,
              train_y,
              validation_set=(test_x, test_y),
              show_metric=False,
              batch_size=256,
              n_epoch=n_epoch)

    # For visualizations
    embedding = tflearn.get_layer_variables_by_name("Embedding")[0]

    return [model, embedding]
Example #25
0
]
net = merge(net, mode='concat')
print("After RNN : ", net.get_shape().as_list())
print("After Dropout : ", net.get_shape().as_list())
net = regression(net,
                 optimizer='adam',
                 loss='binary_crossentropy',
                 learning_rate=0.005)
print("After regression : ", net.get_shape().as_list())

testX = trainX[int(0.3 * len(trainY)):]
testY = trainY[int(0.3 * len(trainY)):]

# Training
model = DNN(net, clip_gradients=0., tensorboard_verbose=2)
embeddingWeights = get_layer_variables_by_name('EmbeddingLayer')[0]
# Assign your own weights (for example, a numpy array [input_dim, output_dim])
model.set_weights(embeddingWeights, embeddings)
model.fit(trainX,
          trainY,
          n_epoch=3,
          validation_set=0.1,
          show_metric=True,
          batch_size=32,
          shuffle=True)
#print( model.evaluate(testX, testY) )
predictions = model.predict(testX)
predictions = prob2Onehot(predictions)
#print("Predictions : ", list(predictions[10]))

##Calculate F1 Score
    model = tflearn.DNN(net, tensorboard_verbose=2)

    model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=False,
              batch_size=512,n_epoch=n_epoch)
    
    return model

split_perc=0.8
train_len, test_len = np.floor(len(train_df)*split_perc), np.floor(len(train_df)*(1-split_perc))
train, test = train_df.ix[:train_len-1], train_df.ix[train_len:train_len + test_len]
model = train_model(train,test,len(vocab))

from sklearn.manifold import TSNE
#retrieve the embedding layer fro mthe model by default name 'Embedding'

embedding = tflearn.get_layer_variables_by_name("Embedding")[0]
finalWs = model.get_weights(embedding)
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
lowDWeights = tsne.fit_transform(finalWs)

# bokeh 시각화 패키지
from bokeh.plotting import figure, show, output_notebook,output_file
from bokeh.models import ColumnDataSource, LabelSet

#control the number of labelled subreddits to display
sparse_labels = [lbl if random.random() <=0.01 else '' for lbl in vocab]
source = ColumnDataSource({'x':lowDWeights[:,0],'y':lowDWeights[:,1],'labels':sparse_labels})


TOOLS="hover,crosshair,pan,wheel_zoom,zoom_in,zoom_out,box_zoom,undo,redo,reset,tap,save,box_select,poly_select,lasso_select,"