Exemple #1
0
    for i in range(batch_size):
        if (i % 2 == 0):
            num = randint(0, 12499)
            labels.append([1, 0])
        else:
            num = randint(12500, 24999)
            labels.append([0, 1])
        arr[i] = test_data[num]
    return arr, labels


# Call implementation
glove_array, glove_dict = imp.load_glove_embeddings()
print("Loaded glove")

training_data = imp.load_data(glove_dict)
test_data = imp.load_data(glove_dict, test=True)
print("Loaded data")

input_data, labels, dropout_keep_prob, optimizer, accuracy, loss = \
    imp.define_graph(glove_array)

# tensorboard
train_acc_op = tf.summary.scalar("training_accuracy", accuracy)
test_acc_op = tf.summary.scalar("testing_accuracy", accuracy)
#tf.summary.scalar("loss", loss)
#summary_op = tf.summary.merge_all()

# saver
all_saver = tf.train.Saver()
Exemple #2
0
def getTestBatch2(training_data):
    labels = []
    arr = np.zeros([batch_size, seq_length])
    for i in range(batch_size):
        num = randint(11499, 13499)
        if (num <= 12499):
            labels.append([1, 0])
        else:
            labels.append([0, 1])
        arr[i] = training_data[num]
    return arr, labels


# Call implementation
glove_array, glove_dict = imp.load_glove_embeddings()
training_data = imp.load_data(glove_dict)

print('Size of training data: ', len(training_data))

input_data, labels, dropout_keep_prob, optimizer, accuracy, loss = imp.define_graph(
    glove_array)

# tensorboard
train_accuracy_op = tf.summary.scalar("accuracy", accuracy)
tf.summary.scalar("loss", loss)
summary_op = tf.summary.merge_all()

# saver
all_saver = tf.train.Saver()

sess = tf.InteractiveSession()
Exemple #3
0
def getTrainBatch():
    labels = []
    arr = np.zeros([batch_size, seq_length])
    for i in range(batch_size):
        if (i % 2 == 0):
            num = randint(0, 12499)
            labels.append([1, 0])
        else:
            num = randint(12500, 24999)
            labels.append([0, 1])
        arr[i] = training_data[num]
    return arr, labels

# Call implementation
glove_array, glove_dict = imp.load_glove_embeddings()
training_data = imp.load_data(glove_dict)
input_data, labels, dropout_keep_prob, optimizer, accuracy, loss = \
    imp.define_graph(glove_array)

# tensorboard
train_accuracy_op = tf.summary.scalar("training_accuracy", accuracy)
tf.summary.scalar("loss", loss)
summary_op = tf.summary.merge_all()

# saver
all_saver = tf.train.Saver()

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())

logdir = "tensorboard/" + datetime.datetime.now().strftime(
Exemple #4
0
def getTrainBatch():
    labels = []
    arr = np.zeros([batch_size, seq_length])
    for i in range(batch_size):
        num = randint(0, training_data.shape[0] - 1)
        label = [0, 0, 0, 0]
        label[training_classes[num]] = 1
        labels.append(label)
        arr[i] = training_data[num]
    return arr, labels


# Call implementation
word2vec_array, word2vec_dict = imp.load_word2vec_embeddings()
training_data, training_classes = imp.load_data(word2vec_dict)
input_data, labels, dropout_keep_prob, optimizer, accuracy, loss = \
    imp.define_graph(word2vec_array)

# tensorboard
train_accuracy_op = tf.summary.scalar("training_accuracy", accuracy)
tf.summary.scalar("loss", loss)
summary_op = tf.summary.merge_all()

# saver
all_saver = tf.train.Saver()

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())

logdir = "tensorboard/" + datetime.datetime.now().strftime(
Exemple #5
0
checkpoints_dir = "./checkpoints"


def getTrainBatch(data, labels):
    sample = np.random.randint(data.shape[0], size=batch_size)
    arr = data[sample, :]
    lab = labels[sample, :]
    return arr, lab


# Call implementation
glove_array, glove_dict = imp.load_glove_embeddings()
print("Loaded glove")

training_data, training_labels = imp.load_data(glove_dict)

print("Loaded data")

input_data, labels, dropout_keep_prob, optimizer, accuracy, loss = \
    imp.define_graph(glove_array)

# tensorboard
train_acc_op = tf.summary.scalar("training_accuracy", accuracy)

#tf.summary.scalar("loss", loss)
#summary_op = tf.summary.merge_all()

# saver
all_saver = tf.train.Saver()
Exemple #6
0
    arr = np.zeros([batch_size, seq_length])
    for i in range(batch_size):
        num = offset + i
        label = [0] * num_classes
        label[validation_classes[num]] = 1
        labels.append(label)
        arr[i] = validation_data[num]
        fnames.append(validation_fnames[num])
    return arr, labels, fnames


# Call implementation
word2vec_array, word2vec_dict = gensim_load.load_google_embeddings(
)  # imp.load_word2vec_embeddings()
# word2vec_array, word2vec_dict = imp.load_word2vec_embeddings()
training_data, training_classes, training_fnames, training_o_comps = imp.load_data(
    word2vec_dict)
original_data = training_data[:, :]
original_classes = training_classes[:]
original_fnames = training_fnames[:]
original_o_comps = training_o_comps[:]
training_data, training_classes, training_fnames, training_o_comps, \
validation_data, validation_classes, validation_fnames, validation_o_comps = \
    validation_split(0.2)
gensim_load.log(",".join(training_fnames))


input_data, labels, dropout_keep_prob, optimizer, accuracy, loss, \
    prediction, correct_pred, pred_class, pred_prob = \
    imp.define_graph(word2vec_array)

# tensorboard
Exemple #7
0
"""This module runs tests"""

from implementation import load_glove_embeddings, load_data, define_graph
import os
import numpy as np
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

embeddings, word_index_dict = load_glove_embeddings()

data = load_data(word_index_dict, test=True)


def getValBatch():
    labels = []
    arr = np.zeros([25000, 40])
    for i in range(25000):
        if i < 12500:
            arr[i] = data[i]
            labels.append([1, 0])
        else:
            arr[i] = data[i]
            labels.append([0, 1])
    return arr, labels


val_data, val_labels = getValBatch()

saver = tf.train.import_meta_graph(
    './checkpoints/trained_model.ckpt-50000.meta')