# Train LSTM to predict entailment from SNLI data

import random
import sys
import time
import tensorflow as tf
import numpy as np
np.set_printoptions(threshold=np.nan)
from util.Layer import Layers
from util.Train_entail_lstm import Training
from util.DataLoader import DataLoader
Data = DataLoader()
Layer = Layers()
sys.path.append(".")


def run(**args):

    tf.reset_default_graph()
    random.seed(20160408)
    tf.set_random_seed(20160408)
    start_time = time.time()
    graph = tf.get_default_graph()

    # Read Training/Dev/Test data
    data_dir = 'data/' + args['data_dir'] + '/'
    np_matrix, index = Data.read_glove_vectors('data/' + args['data_dir'] +
                                               '/' + args['vector_file'])
    num_classes = 3

    # entailment data
Beispiel #2
0
# train model to predict conditional probabilities and p(x) individual phrase probabilities

import random
import sys
from tensorflow.contrib.rnn.python.ops import lstm_ops
import tensorflow as tf
import numpy as np
np.set_printoptions(threshold=np.nan)
from util.Layer import Layers
from util.Train_probability import Training
from util.DataLoader import DataLoader
Sparse = DataLoader()
Layer = Layers()
from util import cube_exp_prob
from util import Probability
from util import Bilinear
from util import corr_prob
import time

sys.path.append(".")


def get_lstm_input(hidden_dim, embeddings, inputs1, inputs2, lengths1,
                   lengths2, dropout, lstm):
    # lstm produce sentence representation, used in all models
    Wemb1 = tf.nn.embedding_lookup(embeddings, inputs1)
    Wemb2 = tf.nn.embedding_lookup(embeddings, inputs2)
    # Wemb1 = tf.Print(Wemb1, [Wemb1], 'term1 embedding')
    lstm_output, fstate1 = tf.nn.dynamic_rnn(lstm,
                                             Wemb1,
                                             sequence_length=lengths1,
Beispiel #3
0
    work_path_name = str(sys.argv[11])

    from pathlib import *
    work_path = Path('/home/ouyangzhihao/Backup/Exp/ZYY/RandomLR')

    total_depth = int(epochs/mini_epoch)  #20
    # mini_epoch = int(epochs / total_depth) #10

    exp_name = '%s_%d_%d_%s_%d_%.4f_%d_%d_%d_%d_%d_MCTS' % (
        dataset_name, epochs, batch_size, optimizer, random_range, init_lr, random_num, power,mini_epoch,sub_tree_depth,0.1
    )
    if ((work_path / work_path_name / 'TB_Logs' / exp_name).exists()):
        print('Already Finished!')
        exit()

    data_loader = DataLoader(dataset_name=dataset_name)
    x_train, y_train, x_test, y_test,num_classes = data_loader.load_data()
    print('Using real-time data augmentation.')
    datagen = ImageDataGenerator(
        # set input mean to 0 over the dataset
        featurewise_center=False,
        # set each sample mean to 0
        samplewise_center=False,
        # divide inputs by std of dataset
        featurewise_std_normalization=False,
        # divide each input by its std
        samplewise_std_normalization=False,
        # apply ZCA whitening
        zca_whitening=False,
        # epsilon for ZCA whitening
        zca_epsilon=1e-06,