Exemple #1
0
def main():
    max_len = 40
    dataset = DataManager(path = data_dir)
    dictionary = dataset.clean_train_dict
    voc_size = dictionary.voc_size
    dataset.BuildTrainableData(max_len = max_len)

    tf.reset_default_graph()
    model2 = S2VT_attention(voc_size = voc_size, max_len = max_len, dtype = tf.float32)
    model2.compile()
    model2.restore()
    test_id = dataset.raw_data['test_id']

    with open(output_dir, 'w') as f:
        for i, vid in enumerate(test_id):
            predict = model2.predict(dataset.test_x[i])
            predict = dataset.clean_train_dict.indexlist2wordlist(predict)
            sentence = ''
            for word in predict:
                if word == '<PAD>':
                    sentence = sentence.strip()
                    break
                sentence += word + ' '
            output_line = vid + ',' + sentence
            if (i + 1) != len(test_id):
                output_line += '\n'
            f.write(output_line)
def main():
    start_time = time.time()
    dataset = DataManager()

    # load dictionary
    dict_file = open('data/dictionary.txt', 'rb')
    word_list = pickle.load(dict_file)
    voc_size = len(word_list)
    dataset.train_dict.train_dict = word_list
    dataset.train_dict.voc_size = voc_size
    dictionary = dataset.train_dict

    # load data from input file
    dataset.LoadTestData(sys.argv[1])
    test_data = np.asarray(dataset.data)

    batch_size = 100
    tf.reset_default_graph()
    model = Seq2Seq(voc_size, batch_size=batch_size, mode='test')
    model.compile()
    output_file = open(sys.argv[2], 'w', encoding='utf8')
    count = 0
    for i in range(int(len(test_data) / batch_size)):
        predict_labels = model.predict(test_data[i * batch_size:((i + 1) *
                                                                 batch_size)])
        for j in range(batch_size):
            count += 1
            result = dictionary.index2sentence(predict_labels[j][0])
            if result.replace(' ', '') == '':
                result = '...'
            output_file.write("%s\n" % result)
    while count <= len(test_data):
        count += 1
        output_file.write("...\n")
    print('Cost time: %.2f minutes' % ((time.time() - start_time) / 60.0))
Exemple #3
0
 def set_testing_data(self):
     print('setting testing data...')
     self.max_len = 25
     dataset = DataManager(max_len = self.max_len)
     self.val_size, self.dictionary = dataset.getTestData()
     tf.reset_default_graph()
Exemple #4
0
 def set_training_data(self):
     print('getting training data...')
     self.max_len = 25
     dataset = DataManager(max_len = self.max_len)
     self.val_size,self.train_x,self.train_y,self.dictionary = dataset.getTrainData()
     tf.reset_default_graph()
Exemple #5
0
if __name__ == '__main__':


    nparts = int(sys.argv[1])
    training = int(sys.argv[2])
    # Initialize logger
    lg = Log()

    # Define data location and timesteps
    trainingDir = ['../training-data/converge-diverge','../training-data/periodic-hills', \
                '../training-data/square-cylinder', '../training-data/square-duct', \
		'../training-data/tandem-cylinders']
    trainingDir = [os.path.join(os.getcwd(), dir0) for dir0 in trainingDir]
    ransTimes = [60, 90, 60, 60, 60]
    lesTimes = [200, 1000, 250, 1700, 170]
    dataManager = DataManager(trainingDir, ransTimes, lesTimes)
        
    foamNN = FoamSVGD(nparts) # Number of SVGD particles
    # Load pre-trained neural networks
    #foamNN.loadNeuralNet('./torchNets/foamNet')
    
    # First set up validation dataset
    #foamNN.getTrainingPoints(dataManager, n_data=500, n_mb=256)
    #foamNN.getTestingPoints(dataManager, n_data=500, n_mb=256)

    XTdirs = ['../../IgnDelay/xdataTr']
    YTdirs = ['../../IgnDelay/ydataTr']
    Xdirs = ['../../IgnDelay/xdataTe']
    Ydirs = ['../../IgnDelay/ydataTe']

Exemple #6
0
# coding: utf-8

from utils.dataManager import DataManager
from utils.Seq2Seq import Seq2Seq
import tensorflow as tf
import numpy as np
import os
import pickle
import sys

dataset = DataManager()

# load training data and label
dataset.LoadData(file_name=sys.argv[1])
train_data = np.asarray(dataset.data)
train_label = np.asarray(dataset.label)

# load dictionary
dict_file = open('data/dictionary.txt', 'rb')
word_list = pickle.load(dict_file)
voc_size = len(word_list)
dataset.train_dict.train_dict = word_list
dataset.train_dict.voc_size = voc_size
dictionary = dataset.train_dict

# define epoch num
epoch_num = 30
tf.reset_default_graph()
model = Seq2Seq(voc_size)
model.compile()
model.fit(train_data, train_label, epoch_num)
Exemple #7
0
from utils.dataManager import DataManager
from models.hw2_1_S2VT_attention import Seq2Seq
import tensorflow as tf
import numpy as np
import sys

data_dir = sys.argv[1]

max_len = 40
dataset = DataManager(path=data_dir)
dictionary = dataset.clean_train_dict
voc_size = dictionary.voc_size
dataset.BuildTrainableData(max_len=max_len)

tf.reset_default_graph()
model = Seq2Seq(voc_size=voc_size, max_len=max_len, dtype=tf.float32)
model.compile()

try:
    model.restore()
except:
    pass

train_x = []
train_y = []
for i, labels in enumerate(dataset.train_y):
    for j, label in enumerate(labels[:5]):
        train_x.append(dataset.train_x[i])
        train_y.append(label)

epoch = 0