Example #1
0
    r'C:\Users\pravi\PycharmProjects\Sentence_similarity\data\sts\sick2014\SICK_train.txt'
)

train_data_1 = res_train['data_1']
train_data_2 = res_train['data_2']
train_length1 = res_train['length1']
train_length2 = res_train['length2']
labels = res_train['label']
word2Id = res_train['word2Id']
Id2Word = res_train['Id2Word']
max_sequence_length = res_train['max_sequence_length']
vocab_size = res_train['vocab_size']
total_classes = res_train['total_classes']

res_test = load_test_data(
    r'C:\Users\pravi\PycharmProjects\Sentence_similarity\data\sts\sick2014\SICK_test_annotated.txt',
    max_sequence_length, word2Id, Id2Word)
word2Id = res_test['word2Id']
test_data_1 = res_test['data_1']
test_data_2 = res_test['data_2']
test_labels = res_test['label']
test_length1 = res_test['length1']
test_length2 = res_test['length2']
Id2Word = res_test['Id2Word']

Id2Vec = np.zeros([len(Id2Word.keys()), embedding_size])
words_list = word_vecs.word2vec.keys()
for i in range(len(Id2Word.keys())):
    word = Id2Word[i]
    if word in words_list:
        Id2Vec[i, :] = word_vecs.word2vec[word]
Example #2
0
alphabet = preprocess.labels()

# Training data
X_train, X_te, y_train, y_te, train_ws = preprocess.load_data()
X_train = np.vstack((X_train, X_te))
y_train = np.hstack((y_train, y_te))
train_data = MyDataset(X_train, y_train,
                       preprocess.make_transform(mode="eval"))
train_loader = DataLoader(train_data,
                          batch_size=512,
                          shuffle=False,
                          num_workers=8,
                          pin_memory=True)

# Testing data
X_test, y_test, test_ws = preprocess.load_test_data()
test_data = MyDataset(X_test, y_test, preprocess.make_transform(mode="eval"))
test_loader = DataLoader(test_data,
                         batch_size=512,
                         shuffle=False,
                         num_workers=8,
                         pin_memory=True)

base_dir = os.getcwd()
model_dir = os.path.join(base_dir, "Code", "model")
model_path = os.path.join(model_dir, "sign_model.pth")

with open(os.path.join(model_dir, "model_specification"), "r") as ms:
    model_name = ms.readline()

# Load the trained model
Example #3
0
              path=r'C:\Users\pravi\PycharmProjects\NLI\data_pickles\data')
print("done")

train_data_1 = res['data_1']
train_data_2 = res['data_2']
train_label = res['labels']
train_data_len_1 = res['data_length_1']
train_data_len_2 = res['data_length_2']
word2Id = res['word2Id']

words_data_list = word2Id.keys()
Id2Word = res['Id2Word']
max_sequence_length = res['max_sequence_length']
total_classes = res['total_classes']

test_res = load_test_data(dev_path, word2Id, Id2Word, max_sequence_length)
test_data_1 = test_res['data_1']
test_data_2 = test_res['data_2']
test_label = test_res['labels']
test_data_len_1 = test_res['data_length_1']
test_data_len_2 = test_res['data_length_2']
word2Id = test_res['word2Id']

Id2Vec = np.zeros([len(Id2Word.keys()), embedding_size])
words_list = word_vecs.word2vec.keys()
for i in range(len(Id2Word.keys())):
    word = Id2Word[i]
    if word in words_list:
        Id2Vec[i, :] = word_vecs.word2vec[word]
    else:
        Id2Vec[i, :] = word_vecs.word2vec['unknown']
Example #4
0
    with open(PICKLE_FILENAME, 'wb') as f:
        pickle.dump(model, f)
    print('Done.')
    training_data = np.genfromtxt(LOG_CSV_FILENAME, delimiter=',')
    plt.plot(training_data[1:, 0], training_data[1:, 1], label='Train loss')
    plt.plot(training_data[1:, 0], training_data[1:, 2], label='Test loss')
    plt.legend()
    plt.xlabel('Epoch')
    plt.show()

# TEST MODE
else:
    with open(PICKLE_FILENAME, 'rb') as f:
        model = pickle.load(f)
    if not TEST_ON_REAL_IMAGES:
        test_images = load_test_data(TEST_FILENAME)
        for i in range(NUM_EXAMPLES_TO_SHOW):
            image = test_images[i][0]
            img_tensor = torch.FloatTensor(image)
            img_tensor = img_tensor.view(1, 1, 96, 96)
            output = model(img_tensor)
            output = output.data[0].numpy()
            output = (output * 48.0) + 48.0
            plot_image_and_predictions(image, output)
    else:
        img = Image.open(TEST_IMAGES[0]).convert('L')
        img = img.resize([96, 96], Image.ANTIALIAS)
        img_data = np.asarray(img.getdata()).reshape(img.size)
        img_data = img_data / float(PIXEL_MAX_VAL)  # normalize to 0..1
        img_tensor = torch.FloatTensor(img_data)
        img_tensor = img_tensor.view(1, 1, 96, 96)