Exemplo n.º 1
0
def model_validate(val_data_normalized, train_window, train_history,
                   train_forward, model, cuda, feature, label):
    # model Validation
    model.eval()
    val_feature, val_label, val_exog, val_len = nn_create_val_inout_sequences(
        val_data_normalized, train_window, train_history, train_forward,
        feature, label, cuda)
    val_set = DataHolder(val_feature, val_label, val_exog, 1, val_len, cuda)
    val_batch = DataLoader(val_set,
                           batch_size=1,
                           shuffle=False,
                           drop_last=True)
    max_tensor_len = train_window - 1
    val_single_loss = 0

    val_pred_all = []
    current_batch = tqdm(val_batch)
    loss_function = nn.MSELoss()

    for idx, batch in enumerate(current_batch):
        feature_tensor, label_tensor, exog_tensor, len_tensor = batch
        model.init_hidden(1, cuda)

        if idx % 12 == 0:
            if idx != 0:
                val_pred_all.append(val_pred_batch)
            val_pred_batch = []
            temp_tensor = feature_tensor
            temp_padded_tensor = pad_tensor(temp_tensor,
                                            len_tensor.squeeze().item(),
                                            max_tensor_len, cuda)
        else:
            temp_padded_tensor = pad_tensor(temp_tensor,
                                            len_tensor.squeeze().item(),
                                            max_tensor_len, cuda)

        with torch.no_grad():

            val_mean_pred, val_std_pred = model(temp_padded_tensor,
                                                exog_tensor, len_tensor, 1)
            val_pred_batch.append(
                (val_mean_pred.clone().detach().cpu().numpy(),
                 val_std_pred.clone().detach().cpu().numpy(),
                 label_tensor.clone().detach().cpu().numpy()))

            single_loss = loss_function(val_mean_pred.squeeze(),
                                        label_tensor.squeeze())

            val_single_loss += single_loss.item()
            #val_pred_all.append((val_mean_pred.squeeze().item(), val_std_pred.squeeze().item()))

            #print(val_pred.unsqueeze(0).size(), exog_tensor.size())
            temp_feature = torch.cat((val_mean_pred.unsqueeze(0), exog_tensor),
                                     dim=2)

            #print(temp_feature.size())
            #print(temp_tensor.size())
            temp_tensor = torch.cat((temp_tensor, temp_feature), dim=1)

    return val_single_loss, val_pred_all
Exemplo n.º 2
0
    def get_loss(self, enc_y, states, target, dropout=0.0):
        '''
        enc_y: batch_size([seq_len, dim])
        states: ([batch, dim], [batch, dim])
        target: [batch, max_len] (padded with -1.)
        '''
        mask = tf.not_equal(target, -1.)
        h, c = states
        enc_y, _ = pad_tensor(enc_y)
        enc_y = tf.nn.dropout(enc_y, 1. - dropout)
        dec_hidden = tf.nn.dropout(h, 1. - dropout)
        dec_cell = tf.nn.dropout(c, 1. - dropout)

        l_states = [(dec_hidden, dec_cell) for _ in range(self.layer)]
        target = tf.nn.relu(target)
        dec_input = target[:, 0]
        loss = 0
        for t in range(1, target.shape[1]):
            # passing enc_output to the decoder
            predictions, l_states, att = self.call(dec_input, l_states, enc_y)
            real = tf.boolean_mask(target[:, t], mask[:, t])
            pred = tf.boolean_mask(predictions, mask[:, t])
            loss += self.loss_function(real, pred)
            # using teacher forcing
            dec_input = target[:, t]

        return loss / tf.reduce_sum(tf.cast(mask, tf.float32))
Exemplo n.º 3
0
    def get_image(self, vids):
        images = [
            torch.from_numpy(self.image_dt[vid]).to(self.device).split(1)
            for vid in vids
        ]
        images = pad_tensor(images).squeeze(2)

        return images
tokenizer = Tokenizer()
print('Initializing vectorizer ...')
vectorizer = Vectorizer(word_embeddings, tokenizer)

#### training dataset ####
# vectorizing
ids, train_a_vectors, train_b_vectors, train_gold = vectorizer.vectorize_df(df)
train_max_a_length = len(max(train_a_vectors, key=len))
train_max_b_length = len(max(train_b_vectors, key=len))
print('maximum number of tokens per sentence A in training set is %d' %
      train_max_a_length)
print('maximum number of tokens per sentence B in training set is %d' %
      train_max_b_length)
max_len = max([train_max_a_length, train_max_b_length])

# padding
train_a_vectors = pad_tensor(train_a_vectors, max_len)
train_b_vectors = pad_tensor(train_b_vectors, max_len)

print('Training the model ...')
siamese = SiameseModel()
validation_data = None
t1 = time.time()
siamese.fit(train_a_vectors,
            train_b_vectors,
            train_gold,
            validation_data,
            epochs=epochs)
t2 = time.time()
print('Took %f seconds' % (t2 - t1))
siamese.save_pretrained_weights(pretrained_weights_file_path)
Exemplo n.º 5
0
#### testing dataset ####
print('Vectorizing testing dataset ...')
ids, test_a_vectors, test_b_vectors, test_gold = vectorizer.vectorize_df(
    test_df)
test_max_a_length = len(max(test_a_vectors, key=len))
test_max_b_length = len(max(test_b_vectors, key=len))
print('maximum number of tokens per sentence A in testing set is %d' %
      test_max_a_length)
print('maximum number of tokens per sentence B in testing set is %d' %
      test_max_b_length)
max_len = max([test_max_a_length, test_max_b_length])

# padding
print('Padding testing dataset ...')
test_a_vectors = pad_tensor(test_a_vectors, max_len)
test_b_vectors = pad_tensor(test_b_vectors, max_len)

print('Loading the model ...')
siamese = SiameseModel(False)
siamese.load(model_path)

print('Testing the model ...')
y = siamese.predict(test_a_vectors, test_b_vectors)
y = [i[0] for i in y]
assert len(test_gold) == len(y)

mse = mean_squared_error(test_gold, y)
print('MSE = %.2f' % mse)

pearsonr = stats.pearsonr(test_gold, y)
print('Initializing objects ...')
print('Initializing word embeddings ...')
t1 = time.time()
word_embeddings = WordEmbeddings(word_embeddings_file_path)
t2 = time.time()
print('\tTook %f seconds' % (t2 - t1))
print('Initializing tokenizer ...')
tokenizer = Tokenizer()
print('Initializing vectorizer ...')
vectorizer = Vectorizer(word_embeddings, tokenizer)


print('Loading the model ...')
siamese = SiameseModel(False)
siamese.load(model_path)


sentences =['There is no man pointing at a car',
            'The woman is not playing the flute',
            'The man is not riding a horse',
            'A man is pointing at a slivar sedan',
            'The woman is playing the flute',
            'A man is riding a horse']
vectors = vectorizer.vectorize_sentences(sentences)
vectors = pad_tensor(vectors, None)

print('Visualizing LSTM activations ...')
siamese.visualize_activation(vectors)
siamese.visualize_specific_activation(vectors, 0)
siamese.visualize_specific_activation(vectors, 5)
siamese.visualize_specific_activation(vectors, 49)
Exemplo n.º 7
0
print('Initializing tokenizer ...')
tokenizer = Tokenizer()
print('Initializing vectorizer ...')
vectorizer = Vectorizer(word_embeddings, tokenizer)

#### training dataset ####
# vectorizing
train_a_vectors, train_b_vectors, train_gold = vectorizer.vectorize_df(train_df)
train_max_a_length = len(max(train_a_vectors, key=len))
train_max_b_length = len(max(train_b_vectors, key=len))
print('maximum number of tokens per sentence A in training set is %d' % train_max_a_length)
print('maximum number of tokens per sentence B in training set is %d' % train_max_b_length)
max_len = max([train_max_a_length, train_max_b_length])

# padding
train_a_vectors = pad_tensor(train_a_vectors, max_len)
train_b_vectors = pad_tensor(train_b_vectors, max_len)


#### development dataset ####
# vectorizing
dev_a_vectors, dev_b_vectors, dev_gold = vectorizer.vectorize_df(dev_df)
dev_max_a_length = len(max(dev_a_vectors, key=len))
dev_max_b_length = len(max(dev_b_vectors, key=len))
print('maximum number of tokens per sentence A in dev set is %d' % dev_max_a_length)
print('maximum number of tokens per sentence B in dev set is %d' % dev_max_b_length)
max_len = max([dev_max_a_length, dev_max_b_length])

# padding
dev_a_vectors = pad_tensor(dev_a_vectors, max_len)
dev_b_vectors = pad_tensor(dev_b_vectors, max_len)