示例#1
0
文件: main.py 项目: karthik997/FYP
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-task', required=True)
    parser.add_argument('-model', required=True)
    parser.add_argument('-eval_step', type=int, default=10)
    parser.add_argument('-epoch', type=int, default=400)
    parser.add_argument('-d_word_vec', type=int, default=300)
    parser.add_argument('-batch_size', type=int, default=100)
    parser.add_argument('-save_model', default=None)
    parser.add_argument('-save_mode', type=str, choices=['all', 'best'], default='best')
    parser.add_argument('-no_cuda', action='store_true')
    parser.add_argument('-lr', type=float, default=0.001)
    parser.add_argument('-n_bins', type=float, default=21)

    opt = parser.parse_args()
    opt.cuda = not opt.no_cuda
    opt.mu =  kernal_mus(opt.n_bins)
    opt.sigma = kernel_sigmas(opt.n_bins)
    print opt

    # ========= Preparing DataLoader =========#
    if opt.task == "wikiqa":
        train_filename = "./data/wikiqa/wiki_train_pair.pkl"
        test_filename = "./data/wikiqa/wiki_test.pkl"
        dev_filename = "./data/wikiqa/wiki_dev.pkl"
        train_data = pickle.load(open(train_filename, 'r'))
        test_data = pickle.load(open(test_filename, 'r'))
        dev_data = pickle.load(open(dev_filename, 'r'))
        weights = np.load("./data/wikiqa/embed.txt")
        
    else:
        raise ("Not implement!")
    train_data = Dataloader(data = train_data, opt = opt, shuffle=True)
    test_data = DataloaderTest(data = test_data, opt = opt)
    dev_data = DataloaderTest(data = dev_data, opt = opt)
    if opt.model == "knrm":
        model = KNRM.knrm(opt, weights)
    else:
        raise ("No such model!")
    crit = nn.MarginRankingLoss(margin=1, size_average=True)

    if opt.cuda:
        model = model.cuda()
        crit = crit.cuda()

    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
    train(model, opt, crit, optimizer, train_data, dev_data, test_data)
示例#2
0
train = optimizer.minimize(loss)

#estimator acc
pred_number = tf.argmax(y_, 1)
correct_pred = tf.equal(tf.argmax(y_, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

#init
init = tf.global_variables_initializer()

sess = tf.Session()

sess.run(init)

#data IO
dl = Dataloader(epoch=4, total_num=50000)
valid_x, valid_y = dl.get_valid_sample(num=5000)
train_x, train_y = dl.get_train_sample()

train_acc = []
test_acc = []
loss_set = []

while (dl.epoch > 0):

    #batch from dataloader
    batch_x, batch_y = dl.batch_train_sample(batch_size=batch_size)
    #run traning step
    if (dl.epoch > 0):
        sess.run(train,
                 feed_dict={
示例#3
0
    def __init__(self, config):
        self.dataLoader = Dataloader(config)

        self.file_pathtype = config.file_pathtype
        self.smoothing_d = config.smoothing_d
示例#4
0
init = tf.global_variables_initializer()

sess = tf.Session()

sess.run(init)

#
paths = '..\\dataset\\training.h5'
indces = np.array(down_sampling(paths))  #transform to one dim
one_indces = indces.reshape(-1)
one_indces = list(one_indces)
one_indces.sort()

#data IO
dl = Dataloader(confidence=list(one_indces),
                epoch=5,
                total_num=len(one_indces))
valid_x, valid_y = dl.get_valid_sample(num=1000)

train_acc = []
test_acc = []
loss_set = []

while (dl.epoch > 0):

    #batch from dataloader
    batch_x, batch_y = dl.batch_train_sample(batch_size=batch_size)
    #run traning step
    if (dl.epoch > 0):
        sess.run(train,
                 feed_dict={
示例#5
0
    def train(self, num_epochs=10):

        data = Dataloader(batchsize=self.batch_size)
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())

            train_X, _ = data.getTrainData()
            num_tr_iter = int(train_X.shape[0] / self.batch_size)
            global_step = 0
            Loss = []
            Acc = []
            for epoch in range(num_epochs):
                print("-------------------------------")
                print("Training epoch: {}".format(epoch + 1))
                data.i = 0
                acc = np.zeros(num_tr_iter)
                loss = np.zeros(num_tr_iter)
                for iteration in range(num_tr_iter):
                    global_step += 1
                    x_batch, y_batch = data.nextBatch()
                    # Run optimization op (backprop)
                    feed_dict_batch = {
                        self.Model.inputs: x_batch,
                        self.Model.labels: y_batch,
                    }
                    sess.run(self.Model.train_op, feed_dict=feed_dict_batch)

                    if iteration % 3 == 0:
                        # Calculate and display the batch loss and accuracy
                        loss_batch, acc_batch = sess.run(
                            [self.Model.loss, self.Model.accuracy],
                            feed_dict=feed_dict_batch,
                        )
                        print(
                            "iter {0:3d}:\t Loss={1:.2f},\tTraining Accuracy={2:.01%}"
                            .format(iteration, loss_batch, acc_batch))
                        acc[iteration] = acc_batch
                        loss[iteration] = loss_batch
                x_valid, y_valid = data.getValiData()
                val_loss, val_acc = sess.run(
                    [self.Model.loss, self.Model.accuracy],
                    feed_dict={
                        self.Model.inputs: x_valid,
                        self.Model.labels: y_valid
                    },
                )
                print(
                    "Epoch: {0}, validation loss: {1:.2f}, validation accuracy: {2:.01%}"
                    .format(epoch + 1, val_loss, val_acc))
                acc, loss = np.mean(acc), np.mean(loss)
                Loss.append(loss)
                Acc.append(acc)

            x_test, y_test = data.getTestData()
            feed_dict_test = {
                self.Model.inputs: x_test,
                self.Model.labels: y_test
            }
            test_loss, test_acc = sess.run(
                [self.Model.loss, self.Model.accuracy],
                feed_dict=feed_dict_test)
            print("--------------------")
            print("Test loss: {0:.2f}, test accuracy: {1:.01%}".format(
                test_loss, test_acc))
        return Loss, Acc
示例#6
0
import numpy as np
import tensorflow as tf
from DataLoader import Dataloader

data = Dataloader(batchsize=32)
train_X, train_Y = data.getTrainData()
test_X, test_Y = data.getTestData()
valid_X, valid_Y = data.getValiData()
label_num = train_Y.shape[0] * train_Y.shape[1] + test_Y.shape[
    0] * test_Y.shape[1] + valid_Y.shape[0] * valid_Y.shape[1]
label_1_num = np.sum(train_Y) + np.sum(test_Y) + np.sum(valid_Y)
pass
示例#7
0
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 30 14:04:36 2018

@author: freeze
"""

from DataLoader import Dataloader

dl = Dataloader(total_num=320)

train_x, train_y = dl.get_train_sample()

while (dl.epoch > 0):
    batch_x, batch_y = dl.batch_train_sample()
    print("epoch %d , random_list %d " % (dl.epoch, len(dl.random_list)))