Пример #1
0
###
# main function
if __name__ == '__main__':
    # parse arguments
    args = parse_args()
    #
    # fetch data
    X, Y, idx2w, w2idx = data.load_data('data/paulg/')
    seqlen = X.shape[1]
    #
    # create the model
    model = GRU_rnn(state_size=256, num_classes=len(idx2w))
    # to train or to generate?
    if args['train']:
        # get train set
        train_set = utils.rand_batch_gen(X, Y, batch_size=BATCH_SIZE)
        #
        # start training
        model.train(train_set)
    elif args['generate']:
        # call generate method
        text = model.generate(
            idx2w,
            w2idx,
            num_words=args['num_words'] if args['num_words'] else 100,
            separator='')
        #########
        # text generation complete
        #
        print('______Generated Text_______')
        print(text)
Пример #2
0
 # fetch data
 X, Y, idx2w, w2idx = data.load_data(
     'data/paulg/'
 )  ##X (input) Y (output) array of indices of symbols/alphabets
 #idx2w : unique symbols list     w2idx: random indices associated to each symbols {' ': 0, '$': 1,}
 seqlen = X.shape[0]
 #
 # create the model
 model = LSTM_rnn(
     state_size=512, num_classes=len(idx2w)
 )  #num of classes are the total number of alphabets and symbols which are predicted
 # to train or to generate?
 if args['train']:
     # get train set
     train_set = utils.rand_batch_gen(
         X, Y, batch_size=BATCH_SIZE
     )  ##returns random sets of batchsize (256 sets) of X[sampleid] and y[sampleid]
     #
     # start training
     model.train(train_set)
 elif args['generate']:
     # call generate method
     text = model.generate(
         idx2w,
         w2idx,
         num_words=args['num_words'] if args['num_words'] else 100,
         separator='')
     #########
     # text generation complete
     #
     print('______Generated Text_______')
Пример #3
0
###
# main function
if __name__ == '__main__':
    # parse arguments
    args = parse_args()
    #
    # fetch data
    X, Y, idx2w, w2idx = data.load_data('data/paulg/')
    seqlen = X.shape[1]
    #
    # create the model
    model = GRU_rnn(state_size = 256, num_classes=len(idx2w))
    # to train or to generate?
    if args['train']:
        # get train set
        train_set = utils.rand_batch_gen(X, Y ,batch_size=BATCH_SIZE)
        #
        # start training
        model.train(train_set)
    elif args['generate']:
        # call generate method
        text = model.generate(idx2w, w2idx, 
                num_words=args['num_words'] if args['num_words'] else 100,
                separator='')
        #########
        # text generation complete
        #
        print('______Generated Text_______')
        print(text)
        print('___________________________')
Пример #4
0
import data
import model
import utils

import tensorflow as tf

if __name__ == '__main__':

    metadata, idx_x, idx_y = data.load_data()

    # params
    seqlen = metadata['max_words']
    state_size = 128
    vocab_size = len(metadata['idx2w'])
    batch_size = 128
    num_classes = 2

    train_set = utils.rand_batch_gen(idx_x, idx_y, batch_size)

    smodel = model.SentiRNN(seqlen=seqlen,
                           vocab_size=vocab_size,
                           num_classes=num_classes,
                           num_layers=1,
                           state_size=128,
                           epochs=10000000,
                           learning_rate=0.1,
                           batch_size=batch_size,
                           ckpt_path='ckpt/')

    smodel.train(train_set)
Пример #5
0
 #
 # optimization
 losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, ys_)
 loss = tf.reduce_mean(losses)
 train_op = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss)
 # 
 # to generate or to train - that is the question.
 if args['train']:
     # 
     # training
     #  setup batches for training
     epochs = 50
     #
     # set batch size
     batch_size = BATCH_SIZE
     train_set = utils.rand_batch_gen(X,Y,batch_size=batch_size)
     # training session
     with tf.Session() as sess:
         sess.run(tf.global_variables_initializer())
         train_loss = 0
         try:
             for i in range(epochs):
                 for j in range(1000):
                     xs, ys = train_set.__next__()
                     _, train_loss_ = sess.run([train_op, loss], feed_dict = {
                             xs_ : xs,
                             ys_ : ys.reshape([batch_size*seqlen]),
                             init_state : np.zeros([batch_size, state_size])
                         })
                     train_loss += train_loss_
                 print('[{}] loss : {}'.format(i,train_loss/1000))
if __name__ == '__main__':

    args = parse_args()
    DATASET = args['dataset']
    process_data(paths[DATASET][0], paths[DATASET][1])
    print(args)
    X_trn, y_trn, X_test, y_test, idx2w, w2idx = data.load_data('data/' + DATASET + '/')
    print("fetched data. trn/test data shape: ", X_trn.shape, y_trn.shape, X_test.shape, y_test.shape)
    
    num_steps_trn_epoch = X_trn.shape[0]/BATCH_SIZE
    num_steps_val_epoch = X_test.shape[0]/BATCH_SIZE
    print("num steps in trn and val epochs", num_steps_trn_epoch, num_steps_val_epoch)

    model = LSTM_rnn(state_size = 512, num_classes=len(idx2w), dataset=DATASET, variant=args['variant'], model_name=args['variant'])
    print("created model.")

    if args['train']:
        train_set = utils.rand_batch_gen(X_trn, y_trn, batch_size=BATCH_SIZE)
        val_set = utils.rand_batch_gen(X_test, y_test, batch_size=BATCH_SIZE)
        print("starting to train model!")
        model.train(train_set, val_set, num_steps_trn_epoch, num_steps_val_epoch)

    elif args['generate']:
        text = model.generate(idx2w, w2idx,
                num_words=args['num_words'] if args['num_words'] else 100,
                separator='')

        print('______Generated Text_______')
        print(text)
        print('___________________________')
Пример #7
0
 #
 # optimization
 losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, ys_)
 loss = tf.reduce_mean(losses)
 train_op = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss)
 #
 # to generate or to train - that is the question.
 if args['train']:
     #
     # training
     #  setup batches for training
     epochs = 50
     #
     # set batch size
     batch_size = BATCH_SIZE
     train_set = utils.rand_batch_gen(X, Y, batch_size=batch_size)
     # training session
     with tf.Session() as sess:
         sess.run(tf.global_variables_initializer())
         train_loss = 0
         try:
             for i in range(epochs):
                 for j in range(1000):
                     xs, ys = train_set.__next__()
                     _, train_loss_ = sess.run(
                         [train_op, loss],
                         feed_dict={
                             xs_: xs,
                             ys_: ys.reshape([batch_size * seqlen]),
                             init_state: np.zeros([batch_size, state_size])
                         })
Пример #8
0
            utils.assert_dir(g['data_path'])
            data.process_data(filename= g['data_file'],
                    path= g['data_path'])
        # check if checkpoint path exists
        utils.assert_dir(g['ckpt_path'])

        try:
            # fetch dataset
            X, Y, idx2ch, ch2idx = data.load_data(path=g['data_path'])
        except:
            print('\n>> Is the folder {} empty?'.format(g['ckpt_path']))
            print('Shouldn\'t it be?')
            sys.exit()

        # training set batch generator
        trainset = utils.rand_batch_gen(X, Y, batch_size= g['batch_size'])

        # build the model
        num_classes = len(idx2ch)
        net = char_rnn.CharRNN(seqlen= X.shape[-1], 
                               num_classes= num_classes,
                               num_layers= g['num_layers'],
                               state_size= g['state_size'],
                               epochs= 100000000,
                               learning_rate= g['learning_rate'],
                               batch_size= g['batch_size'],
                               ckpt_path= g['ckpt_path'],
                               model_name= g['model_name']
                               )

        # train on trainset