def main(model_type, dataset_path, ptb_path, save_path,
    num_steps, encoder_size, pos_decoder_size, chunk_decoder_size, dropout,
    batch_size, pos_embedding_size, num_shared_layers, num_private_layers, chunk_embedding_size,
    lm_decoder_size, bidirectional, lstm, write_to_file, mix_percent,glove_path,max_epoch,embedding=False):

    """Main."""
    config = Config(num_steps, encoder_size, pos_decoder_size, chunk_decoder_size, dropout,
    batch_size, pos_embedding_size, num_shared_layers, num_private_layers, chunk_embedding_size,
    lm_decoder_size, bidirectional, lstm, mix_percent, max_epoch)

    raw_data_path = dataset_path + '/data'
    raw_data = reader.raw_x_y_data(
        raw_data_path, config.num_steps, ptb_path + '/data', embedding, glove_path)

    words_t, pos_t, chunk_t, words_v, \
        pos_v, chunk_v, word_to_id, pos_to_id, \
        chunk_to_id, words_test, pos_test, chunk_test, \
        words_c, pos_c, chunk_c, words_ptb, pos_ptb, chunk_ptb, word_embedding = raw_data

    num_pos_tags = len(pos_to_id)
    num_chunk_tags = len(chunk_to_id)
    vocab_size = len(word_to_id)

    if embedding==True:
        word_embedding = np.float32(word_embedding)
    else:
        pdb.set_trace()
        word_embedding = np.float32((np.random.rand(vocab_size, config.word_embedding_size)-0.5)*config.init_scale)

    with tf.Graph().as_default(), tf.Session() as session:
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale)

        # model to train hyperparameters on
        with tf.variable_scope("hyp_model", reuse=None, initializer=initializer):
            m = Shared_Model(is_training=True, config=config, num_pos_tags=num_pos_tags,
            num_chunk_tags=num_chunk_tags, vocab_size=vocab_size, word_embedding=word_embedding)

        with tf.variable_scope("hyp_model", reuse=True, initializer=initializer):
            mValid = Shared_Model(is_training=False, config=config, num_pos_tags=num_pos_tags,
            num_chunk_tags=num_chunk_tags, vocab_size=vocab_size, word_embedding=word_embedding)

        # model that trains, given hyper-parameters
        with tf.variable_scope("final_model", reuse=None, initializer=initializer):
            mTrain = Shared_Model(is_training=True, config=config, num_pos_tags=num_pos_tags,
            num_chunk_tags=num_chunk_tags, vocab_size=vocab_size, word_embedding=word_embedding)

        with tf.variable_scope("final_model", reuse=True, initializer=initializer):
            mTest = Shared_Model(is_training=False, config=config, num_pos_tags=num_pos_tags,
            num_chunk_tags=num_chunk_tags, vocab_size=vocab_size, word_embedding=word_embedding)

        tf.initialize_all_variables().run()

        v_dict = saveload.load_np('../../data/outputs/temp/fin-model.pkl',session)
        for key, value in v_dict.items():
            try:
                session.run(tf.assign(v_dict[key], value))
            except:
                pdb.set_trace()

        print('getting training predictions')
        _, posp_t, chunkp_t, lmp_t, post_t, chunkt_t, lmt_t, _, _, _ = \
            run_epoch(session, mValid, words_t, pos_t, chunk_t,
                      num_pos_tags, num_chunk_tags, vocab_size,
                      verbose=True, valid=True, model_type=model_type)

        print('getting validation predictions')
        valid_loss, posp_v, chunkp_v, lmp_v, post_v, chunkt_v, lmt_v, pos_v_loss, chunk_v_loss, lm_v_loss = \
            run_epoch(session, mValid, words_v, pos_v, chunk_v,
                      num_pos_tags, num_chunk_tags, vocab_size,
                      verbose=True, valid=True, model_type=model_type)


        print('Getting Testing Predictions')
        _, posp_test, chunkp_test, _, _, _, _, _, _, _ = \
            run_epoch(session, mTest,
                      words_test, pos_test, chunk_test,
                      num_pos_tags, num_chunk_tags, vocab_size,
                      verbose=True, valid=True, model_type=model_type)


        print('Writing Predictions')


        # get training predictions as list
        posp_t = reader._res_to_list(posp_t, config.batch_size, config.num_steps,
                                     pos_to_id, len(words_t), to_str=True)
        chunkp_t = reader._res_to_list(chunkp_t, config.batch_size,
                                       config.num_steps, chunk_to_id, len(words_t), to_str=True)
        lmp_t = reader._res_to_list(lmp_t, config.batch_size,
                                        config.num_steps, word_to_id, len(words_t), to_str=True)
        post_t = reader._res_to_list(post_t, config.batch_size, config.num_steps,
                                     pos_to_id, len(words_t), to_str=True)
        chunkt_t = reader._res_to_list(chunkt_t, config.batch_size,
                                       config.num_steps, chunk_to_id, len(words_t), to_str=True)
        lmt_t = reader._res_to_list(lmt_t, config.batch_size,
                                        config.num_steps, word_to_id, len(words_t), to_str=True)


        # get predictions as list
        posp_v = reader._res_to_list(posp_v, config.batch_size, config.num_steps,
                                     pos_to_id, len(words_v), to_str=True)
        chunkp_v = reader._res_to_list(chunkp_v, config.batch_size,
                                       config.num_steps, chunk_to_id, len(words_v), to_str=True)
        lmp_v = reader._res_to_list(lmp_v, config.batch_size,
                                       config.num_steps, word_to_id, len(words_v), to_str=True)
        chunkt_v = reader._res_to_list(chunkt_v, config.batch_size,
                                       config.num_steps, chunk_to_id, len(words_v), to_str=True)
        post_v = reader._res_to_list(post_v, config.batch_size, config.num_steps,
                                     pos_to_id, len(words_v), to_str=True)
        lmt_v = reader._res_to_list(lmt_v, config.batch_size,
                                       config.num_steps, word_to_id, len(words_v), to_str=True)
        # prediction reshaping
        posp_c = reader._res_to_list(posp_c, config.batch_size, config.num_steps,
                                     pos_to_id, len(words_c),to_str=True)
        posp_test = reader._res_to_list(posp_test, config.batch_size, config.num_steps,
                                        pos_to_id, len(words_test),to_str=True)
        chunkp_c = reader._res_to_list(chunkp_c, config.batch_size,
                                       config.num_steps, chunk_to_id, len(words_c),to_str=True)
        chunkp_test = reader._res_to_list(chunkp_test, config.batch_size, config.num_steps,
                                          chunk_to_id, len(words_test), to_str=True)


        train_custom = reader.read_tokens(raw_data_path + '/train.txt', 0,-1)
        valid_custom = reader.read_tokens(raw_data_path + '/validation.txt',0, -1)
        combined = reader.read_tokens(raw_data_path + '/train_val_combined.txt',0, -1)
        test_data = reader.read_tokens(raw_data_path + '/test.txt',0, -1)

        print('loaded text')
        chunk_pred_train = np.concatenate((np.transpose(train_custom), [str(s).upper() for s in chunkp_t]), axis=1)
        chunk_pred_val = np.concatenate((np.transpose(valid_custom), [str(s).upper() for s in chunkp_v]), axis=1)
        chunk_pred_c = np.concatenate((np.transpose(combined), [str(s).upper() for s in chunkp_c]), axis=1)
        chunk_pred_test = np.concatenate((np.transpose(test_data), [str(s).upper() for s in chunkp_test]), axis=1)
        pos_pred_train = np.concatenate((np.transpose(train_custom), [str(s).upper() for s in posp_t]), axis=1)
        pos_pred_val = np.concatenate((np.transpose(valid_custom), [str(s).upper() for s in posp_v]), axis=1)
        pos_pred_c = np.concatenate((np.transpose(combined), [str(s).upper for s in posp_c]), axis=1)
        pos_pred_test = np.concatenate((np.transpose(test_data), [str(s).upper() for s in posp_test]), axis=1)

        print('finished concatenating, about to start saving')

        np.savetxt(save_path + '/predictions/chunk_pred_train.txt',
                   chunk_pred_train, fmt='%s')
        print('writing to ' + save_path + '/predictions/chunk_pred_train.txt')
        np.savetxt(save_path + '/predictions/chunk_pred_val.txt',
                   chunk_pred_val, fmt='%s')
        print('writing to ' + save_path + '/predictions/chunk_pred_val.txt')
        np.savetxt(save_path + '/predictions/chunk_pred_combined.txt',
                   chunk_pred_c, fmt='%s')
        print('writing to ' + save_path + '/predictions/chunk_pred_val.txt')
        np.savetxt(save_path + '/predictions/chunk_pred_test.txt',
                   chunk_pred_test, fmt='%s')
        print('writing to ' + save_path + '/predictions/chunk_pred_val.txt')
        np.savetxt(save_path + '/predictions/pos_pred_train.txt',
                   pos_pred_train, fmt='%s')
        print('writing to ' + save_path + '/predictions/chunk_pred_val.txt')
        np.savetxt(save_path + '/predictions/pos_pred_val.txt',
                   pos_pred_val, fmt='%s')
        print('writing to ' + save_path + '/predictions/chunk_pred_val.txt')
        np.savetxt(save_path + '/predictions/pos_pred_combined.txt',
                   pos_pred_c, fmt='%s')
        np.savetxt(save_path + '/predictions/pos_pred_test.txt',
                   pos_pred_test, fmt='%s')
示例#2
0
def main(model_type, dataset_path, save_path):
    """Main"""
    config = Config()
    raw_data = reader.raw_x_y_data(
        dataset_path, config.num_steps)
    words_t, pos_t, chunk_t, words_v, \
    pos_v, chunk_v, word_to_id, pos_to_id, \
    chunk_to_id, words_test, pos_test, chunk_test, \
    words_c, pos_c, chunk_c = raw_data

    config.num_pos_tags = len(pos_to_id)
    config.num_chunk_tags = len(chunk_to_id)

    with tf.Graph().as_default(), tf.Session() as session:
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale)

        # model to train hyperparameters on
        with tf.variable_scope("hyp_model", reuse=None, initializer=initializer):
            m = Shared_Model(is_training=True, config=config)
        with tf.variable_scope("hyp_model", reuse=True, initializer=initializer):
            mvalid = Shared_Model(is_training=False, config=config)

        # model that trains, given hyper-parameters
        with tf.variable_scope("final_model", reuse=None, initializer=initializer):
            mTrain = Shared_Model(is_training=True, config=config)
        with tf.variable_scope("final_model", reuse=True, initializer=initializer):
            mTest = Shared_Model(is_training=False, config=config)

        tf.initialize_all_variables().run()

        # Create an empty array to hold [epoch number, loss]
        best_epoch = [0, 100000]

        print('finding best epoch parameter')
        # ====================================
        # Create vectors for training results
        # ====================================

        # Create empty vectors for loss
        train_loss_stats = np.array([])
        train_pos_loss_stats = np.array([])
        train_chunk_loss_stats = np.array([])
        # Create empty vectors for accuracy
        train_pos_stats = np.array([])
        train_chunk_stats = np.array([])

        # ====================================
        # Create vectors for validation results
        # ====================================
        # Create empty vectors for loss
        valid_loss_stats = np.array([])
        valid_pos_loss_stats = np.array([])
        valid_chunk_loss_stats = np.array([])
        # Create empty vectors for accuracy
        valid_pos_stats = np.array([])
        valid_chunk_stats = np.array([])

        for i in range(config.max_epoch):
            print("Epoch: %d" % (i + 1))
            mean_loss, posp_t, chunkp_t, post_t, chunkt_t, pos_loss, chunk_loss = \
                run_epoch(session, m,
                          words_t, pos_t, chunk_t,
                          config.num_pos_tags, config.num_chunk_tags,
                          verbose=True, model_type=model_type)

            # Save stats for charts
            train_loss_stats = np.append(train_loss_stats, mean_loss)
            train_pos_loss_stats = np.append(train_pos_loss_stats, pos_loss)
            train_chunk_loss_stats = np.append(train_chunk_loss_stats, chunk_loss)

            # get predictions as list
            posp_t = reader.res_to_list(posp_t, config.batch_size, config.num_steps,
                                        pos_to_id, len(words_t))
            chunkp_t = reader.res_to_list(chunkp_t, config.batch_size,
                                          config.num_steps, chunk_to_id, len(words_t))
            post_t = reader.res_to_list(post_t, config.batch_size, config.num_steps,
                                        pos_to_id, len(words_t))
            chunkt_t = reader.res_to_list(chunkt_t, config.batch_size,
                                          config.num_steps, chunk_to_id, len(words_t))

            # find the accuracy
            pos_acc = np.sum(posp_t == post_t) / float(len(posp_t))
            chunk_acc = np.sum(chunkp_t == chunkt_t) / float(len(chunkp_t))

            # add to array
            train_pos_stats = np.append(train_pos_stats, pos_acc)
            train_chunk_stats = np.append(train_chunk_stats, chunk_acc)

            # print for tracking
            print("Pos Training Accuracy After Epoch %d :  %3f" % (i + 1, pos_acc))
            print("Chunk Training Accuracy After Epoch %d : %3f" % (i + 1, chunk_acc))

            valid_loss, posp_v, chunkp_v, post_v, chunkt_v, pos_v_loss, chunk_v_loss = \
                run_epoch(session, mvalid, words_v, pos_v, chunk_v,
                          config.num_pos_tags, config.num_chunk_tags,
                          verbose=True, valid=True, model_type=model_type)

            # Save loss for charts
            valid_loss_stats = np.append(valid_loss_stats, valid_loss)
            valid_pos_loss_stats = np.append(valid_pos_loss_stats, pos_v_loss)
            valid_chunk_loss_stats = np.append(valid_chunk_loss_stats, chunk_v_loss)

            # get predictions as list

            posp_v = reader.res_to_list(posp_v, config.batch_size, config.num_steps,
                                        pos_to_id, len(words_v))
            chunkp_v = reader.res_to_list(chunkp_v, config.batch_size,
                                          config.num_steps, chunk_to_id, len(words_v))
            chunkt_v = reader.res_to_list(chunkt_v, config.batch_size,
                                          config.num_steps, chunk_to_id, len(words_v))
            post_v = reader.res_to_list(post_v, config.batch_size, config.num_steps,
                                        pos_to_id, len(words_v))

            # find accuracy
            pos_acc = np.sum(posp_v == post_v) / float(len(posp_v))
            chunk_acc = np.sum(chunkp_v == chunkt_v) / float(len(chunkp_v))

            print("Pos Validation Accuracy After Epoch %d :  %3f" % (i + 1, pos_acc))
            print("Chunk Validation Accuracy After Epoch %d : %3f" % (i + 1, chunk_acc))

            # add to stats
            valid_pos_stats = np.append(valid_pos_stats, pos_acc)
            valid_chunk_stats = np.append(valid_chunk_stats, chunk_acc)

            # update best parameters
            if (valid_loss < best_epoch[1]):
                best_epoch = [i + 1, valid_loss]

        # Save loss & accuracy plots
        np.savetxt(save_path + '/loss/valid_loss_stats.txt', valid_loss_stats)
        np.savetxt(save_path + '/loss/valid_pos_loss_stats.txt', valid_pos_loss_stats)
        np.savetxt(save_path + '/loss/valid_chunk_loss_stats.txt', valid_chunk_loss_stats)
        np.savetxt(save_path + '/accuracy/valid_pos_stats.txt', valid_pos_stats)
        np.savetxt(save_path + '/accuracy/valid_chunk_stats.txt', valid_chunk_stats)

        np.savetxt(save_path + '/loss/train_loss_stats.txt', train_loss_stats)
        np.savetxt(save_path + '/loss/train_pos_loss_stats.txt', train_pos_loss_stats)
        np.savetxt(save_path + '/loss/train_chunk_loss_stats.txt', train_chunk_loss_stats)
        np.savetxt(save_path + '/accuracy/train_pos_stats.txt', train_pos_stats)
        np.savetxt(save_path + '/accuracy/train_chunk_stats.txt', train_chunk_stats)

        # Train given epoch parameter
        print('Train Given Best Epoch Parameter :' + str(best_epoch[0]))
        for i in range(best_epoch[0]):
            print("Epoch: %d" % (i + 1))
            _, posp_c, chunkp_c, _, _, _, _ = \
                run_epoch(session, mTrain,
                          words_c, pos_c, chunk_c,
                          config.num_pos_tags, config.num_chunk_tags,
                          verbose=True, model_type=model_type)

        print('Getting Testing Predictions')
        _, posp_test, chunkp_test, _, _, _, _ = \
            run_epoch(session, mTest,
                      words_test, pos_test, chunk_test,
                      config.num_pos_tags, config.num_chunk_tags,
                      verbose=True, valid=True, model_type=model_type)

        print('Writing Predictions')
        # prediction reshaping
        posp_c = reader.res_to_list(posp_c, config.batch_size, config.num_steps,
                                    pos_to_id, len(words_c))
        posp_test = reader.res_to_list(posp_test, config.batch_size, config.num_steps,
                                       pos_to_id, len(words_test))
        chunkp_c = reader.res_to_list(chunkp_c, config.batch_size,
                                      config.num_steps, chunk_to_id, len(words_c))
        chunkp_test = reader.res_to_list(chunkp_test, config.batch_size, config.num_steps,
                                         chunk_to_id, len(words_test))

        # save pickle - save_path + '/saved_variables.pkl'
        print('saving variables (pickling)')
        saveload.save(save_path + '/saved_variables.pkl', session)

        train_custom = reader.read_tokens(dataset_path + '/train.txt', 0)
        valid_custom = reader.read_tokens(dataset_path + '/validation.txt', 0)
        combined = reader.read_tokens(dataset_path + '/train_val_combined.txt', 0)
        test_data = reader.read_tokens(dataset_path + '/test.txt', 0)

        print('loaded text')

        chunk_pred_train = np.concatenate((np.transpose(train_custom), chunkp_t), axis=1)
        chunk_pred_val = np.concatenate((np.transpose(valid_custom), chunkp_v), axis=1)
        chunk_pred_c = np.concatenate((np.transpose(combined), chunkp_c), axis=1)
        chunk_pred_test = np.concatenate((np.transpose(test_data), chunkp_test), axis=1)
        pos_pred_train = np.concatenate((np.transpose(train_custom), posp_t), axis=1)
        pos_pred_val = np.concatenate((np.transpose(valid_custom), posp_v), axis=1)
        pos_pred_c = np.concatenate((np.transpose(combined), posp_c), axis=1)
        pos_pred_test = np.concatenate((np.transpose(test_data), posp_test), axis=1)

        print('finished concatenating, about to start saving')

        np.savetxt(save_path + '/predictions/chunk_pred_train.txt',
                   chunk_pred_train, fmt='%s')
        print('writing to ' + save_path + '/predictions/chunk_pred_train.txt')
        np.savetxt(save_path + '/predictions/chunk_pred_val.txt',
                   chunk_pred_val, fmt='%s')
        print('writing to ' + save_path + '/predictions/chunk_pred_val.txt')
        np.savetxt(save_path + '/predictions/chunk_pred_combined.txt',
                   chunk_pred_c, fmt='%s')
        print('writing to ' + save_path + '/predictions/chunk_pred_val.txt')
        np.savetxt(save_path + '/predictions/chunk_pred_test.txt',
                   chunk_pred_test, fmt='%s')
        print('writing to ' + save_path + '/predictions/chunk_pred_val.txt')
        np.savetxt(save_path + '/predictions/pos_pred_train.txt',
                   pos_pred_train, fmt='%s')
        print('writing to ' + save_path + '/predictions/chunk_pred_val.txt')
        np.savetxt(save_path + '/predictions/pos_pred_val.txt',
                   pos_pred_val, fmt='%s')
        print('writing to ' + save_path + '/predictions/chunk_pred_val.txt')
        np.savetxt(save_path + '/predictions/pos_pred_combined.txt',
                   pos_pred_c, fmt='%s')
        np.savetxt(save_path + '/predictions/pos_pred_test.txt',
                   pos_pred_test, fmt='%s')
示例#3
0
def main(model_type, dataset_path, ptb_path, save_path,
    num_steps, encoder_size, pos_decoder_size, chunk_decoder_size, dropout,
    batch_size, pos_embedding_size, num_shared_layers, num_private_layers, chunk_embedding_size,
    lm_decoder_size, bidirectional, lstm, write_to_file, mix_percent,glove_path,max_epoch, num_batches_gold, \
    reg_weight, word_embedding_size, projection_size, embedding_trainable, adam, embedding=False, test=False):
    """Main."""
    config = Config(num_steps, encoder_size, pos_decoder_size,
                    chunk_decoder_size, dropout, batch_size,
                    pos_embedding_size, num_shared_layers, num_private_layers,
                    chunk_embedding_size, lm_decoder_size, bidirectional, lstm,
                    mix_percent, max_epoch, reg_weight, word_embedding_size,
                    embedding_trainable, adam)

    raw_data_path = dataset_path + '/data'
    raw_data = reader.raw_x_y_data(raw_data_path, ptb_path + '/data',
                                   num_steps, embedding, glove_path)

    words_t, pos_t, chunk_t, words_v, \
        pos_v, chunk_v, word_to_id, pos_to_id, \
        chunk_to_id, words_test, pos_test, chunk_test, \
        words_c, pos_c, chunk_c, words_ptb, pos_ptb, chunk_ptb, word_embedding = raw_data

    config.num_pos_tags = num_pos_tags = len(pos_to_id)
    config.num_chunk_tags = num_chunk_tags = len(chunk_to_id)
    config.vocab_size = vocab_size = len(word_to_id)

    train_lengths = [len(s) for s in words_t]
    validation_lengths = [len(s) for s in words_v]
    test_lengths = [len(s) for s in words_test]
    ptb_lengths = [len(s) for s in words_ptb]
    combined_lengths = [len(s) for s in words_c]

    # Create an empty array to hold [epoch number, loss]
    if test == False:
        best_epoch = [0, 0.0]
    else:
        best_epoch = [max_epoch, 0.0]

    print('constructing word embedding')

    if embedding == True:
        word_embedding = np.float32(word_embedding)
    else:
        word_embedding = np.float32(
            (np.random.rand(vocab_size, config.word_embedding_size) - 0.5) *
            config.init_scale)

    if test == False:
        with tf.Graph().as_default(), tf.Session() as session:
            print('building models')
            initializer = tf.random_uniform_initializer(
                -config.init_scale, config.init_scale)

            # model to train hyperparameters on
            with tf.variable_scope("hyp_model",
                                   reuse=None,
                                   initializer=initializer):
                m = Shared_Model(is_training=True,
                                 config=config,
                                 num_pos_tags=num_pos_tags,
                                 num_chunk_tags=num_chunk_tags,
                                 vocab_size=vocab_size,
                                 num_steps=num_steps,
                                 embedding_dim=config.word_embedding_size,
                                 projection_size=projection_size)

            with tf.variable_scope("hyp_model",
                                   reuse=True,
                                   initializer=initializer):
                mValid = Shared_Model(is_training=False,
                                      config=config,
                                      num_pos_tags=num_pos_tags,
                                      num_chunk_tags=num_chunk_tags,
                                      vocab_size=vocab_size,
                                      num_steps=num_steps,
                                      embedding_dim=config.word_embedding_size,
                                      projection_size=projection_size)

            print('initialising most variables')

            tf.initialize_all_variables().run()
            print('initialise word embedding')
            session.run(m.embedding_init,
                        {m.embedding_placeholder: word_embedding})
            session.run(mValid.embedding_init,
                        {mValid.embedding_placeholder: word_embedding})

            print('finding best epoch parameter')
            # ====================================
            # Create vectors for training results
            # ====================================

            # Create empty vectors for loss
            train_loss_stats = np.array([])
            train_pos_loss_stats = np.array([])
            train_chunk_loss_stats = np.array([])
            train_lm_loss_stats = np.array([])

            # Create empty vectors for accuracy
            train_pos_stats = np.array([])
            train_chunk_stats = np.array([])

            # ====================================
            # Create vectors for validation results
            # ====================================
            # Create empty vectors for loss
            valid_loss_stats = np.array([])
            valid_pos_loss_stats = np.array([])
            valid_chunk_loss_stats = np.array([])
            valid_lm_loss_stats = np.array([])

            # Create empty vectors for accuracy
            valid_pos_stats = np.array([])
            valid_chunk_stats = np.array([])

            for i in range(config.max_epoch):
                print(time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()))
                print("Epoch: %d" % (i + 1))

                if i > num_batches_gold:
                    gold_percent = gold_percent * 0.8
                else:
                    gold_percent = 1
                if np.random.rand(1) < gold_percent:
                    gold_embed = 1
                else:
                    gold_embed = 0
                mean_loss, posp_t, chunkp_t, lmp_t, post_t, chunkt_t, lmt_t, pos_loss, chunk_loss, lm_loss = \
                    run_epoch_random.run_epoch(session, m,
                              words_t, words_ptb, pos_t, pos_ptb, chunk_t, chunk_ptb,
                              num_pos_tags, num_chunk_tags, vocab_size, num_steps, num_batches_gold, config,
                              verbose=True,  model_type=model_type)

                print('epoch finished')
                # Save stats for charts
                train_loss_stats = np.append(train_loss_stats, mean_loss)
                train_pos_loss_stats = np.append(train_pos_loss_stats,
                                                 pos_loss)
                train_chunk_loss_stats = np.append(train_chunk_loss_stats,
                                                   chunk_loss)
                train_lm_loss_stats = np.append(train_lm_loss_stats, lm_loss)

                # get training predictions as list
                posp_t = reader._res_to_list(posp_t,
                                             config.batch_size,
                                             pos_to_id,
                                             len(words_t),
                                             train_lengths,
                                             to_str=True)
                chunkp_t = reader._res_to_list(chunkp_t,
                                               config.batch_size,
                                               chunk_to_id,
                                               len(words_t),
                                               train_lengths,
                                               to_str=True)
                lmp_t = reader._res_to_list(lmp_t,
                                            config.batch_size,
                                            word_to_id,
                                            len(words_t),
                                            train_lengths,
                                            to_str=True)
                post_t = reader._res_to_list(post_t,
                                             config.batch_size,
                                             pos_to_id,
                                             len(words_t),
                                             train_lengths,
                                             to_str=True)
                chunkt_t = reader._res_to_list(chunkt_t,
                                               config.batch_size,
                                               chunk_to_id,
                                               len(words_t),
                                               train_lengths,
                                               to_str=True)
                lmt_t = reader._res_to_list(lmt_t,
                                            config.batch_size,
                                            word_to_id,
                                            len(words_t),
                                            train_lengths,
                                            to_str=True)

                # find the accuracy
                print('finding accuracy')
                pos_acc = np.sum(posp_t == post_t) / float(len(posp_t))
                chunk_F1 = sklearn.metrics.f1_score(chunkt_t,
                                                    chunkp_t,
                                                    average="weighted")

                # add to array
                train_pos_stats = np.append(train_pos_stats, pos_acc)
                train_chunk_stats = np.append(train_chunk_stats, chunk_F1)

                # print for tracking
                print("Pos Training Accuracy After Epoch %d :  %3f" %
                      (i + 1, pos_acc))
                print("Chunk Training F1 After Epoch %d : %3f" %
                      (i + 1, chunk_F1))
                print(time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()))

                valid_loss, posp_v, chunkp_v, lmp_v, post_v, chunkt_v, lmt_v, pos_v_loss, chunk_v_loss, lm_v_loss = \
                    run_epoch_random.run_epoch(session, mValid,
                              words_v, words_ptb, pos_v, pos_ptb, chunk_v, chunk_ptb,
                              num_pos_tags, num_chunk_tags, vocab_size, num_steps, num_batches_gold, config,
                              verbose=True,  model_type=model_type, valid=True)

                # Save loss for charts
                valid_loss_stats = np.append(valid_loss_stats, valid_loss)
                valid_pos_loss_stats = np.append(valid_pos_loss_stats,
                                                 pos_v_loss)
                valid_chunk_loss_stats = np.append(valid_chunk_loss_stats,
                                                   chunk_v_loss)
                valid_lm_loss_stats = np.append(valid_lm_loss_stats, lm_v_loss)

                # get predictions as list
                posp_v = reader._res_to_list(posp_v,
                                             config.batch_size,
                                             pos_to_id,
                                             len(words_v),
                                             validation_lengths,
                                             to_str=True)
                chunkp_v = reader._res_to_list(chunkp_v,
                                               config.batch_size,
                                               chunk_to_id,
                                               len(words_v),
                                               validation_lengths,
                                               to_str=True)
                lmp_v = reader._res_to_list(lmp_v,
                                            config.batch_size,
                                            word_to_id,
                                            len(words_v),
                                            validation_lengths,
                                            to_str=True)
                chunkt_v = reader._res_to_list(chunkt_v,
                                               config.batch_size,
                                               chunk_to_id,
                                               len(words_v),
                                               validation_lengths,
                                               to_str=True)
                post_v = reader._res_to_list(post_v,
                                             config.batch_size,
                                             pos_to_id,
                                             len(words_v),
                                             validation_lengths,
                                             to_str=True)
                lmt_v = reader._res_to_list(lmt_v,
                                            config.batch_size,
                                            word_to_id,
                                            len(words_v),
                                            validation_lengths,
                                            to_str=True)

                # find accuracy
                pos_acc = np.sum(posp_v == post_v) / float(len(posp_v))
                chunk_F1 = sklearn.metrics.f1_score(chunkt_v,
                                                    chunkp_v,
                                                    average="weighted")

                print("Pos Validation Accuracy After Epoch %d :  %3f" %
                      (i + 1, pos_acc))
                print("Chunk Validation F1 After Epoch %d : %3f" %
                      (i + 1, chunk_F1))

                # add to stats
                valid_pos_stats = np.append(valid_pos_stats, pos_acc)
                valid_chunk_stats = np.append(valid_chunk_stats, chunk_F1)

                # check annealing
                if (round(chunk_F1, 3) == round(best_epoch[1],
                                                3)) & (config.adam == False):
                    config.learning_rate = 0.8 * config.learning_rate
                    print("learning rate updated: %f" % config.learning_rate)

                # update best parameters
                if (chunk_F1 > best_epoch[1]):
                    best_epoch = [i + 1, chunk_F1]

                    saveload.save(save_path + '/val_model.pkl', session)
                    print("Model saved in file: %s" % save_path)

                    id_to_word = {v: k for k, v in word_to_id.items()}

                    words_t_unrolled = [
                        id_to_word[k] for k in np.concatenate(words_t)
                    ]
                    words_v_unrolled = [
                        id_to_word[k] for k in np.concatenate(words_v)
                    ]

                    # unroll data
                    train_custom = [
                        words_t_unrolled,
                        np.char.upper(post_t),
                        np.char.upper(chunkt_t)
                    ]
                    valid_custom = [
                        words_v_unrolled,
                        np.char.upper(post_v),
                        np.char.upper(chunkt_v)
                    ]
                    chunk_pred_train = np.concatenate(
                        (np.transpose(train_custom),
                         np.char.upper(chunkp_t).reshape(-1, 1)),
                        axis=1)
                    chunk_pred_val = np.concatenate(
                        (np.transpose(valid_custom),
                         np.char.upper(chunkp_v).reshape(-1, 1)),
                        axis=1)
                    pos_pred_train = np.concatenate(
                        (np.transpose(train_custom),
                         np.char.upper(posp_t).reshape(-1, 1)),
                        axis=1)
                    pos_pred_val = np.concatenate(
                        (np.transpose(valid_custom),
                         np.char.upper(posp_v).reshape(-1, 1)),
                        axis=1)

                    # write to file
                    np.savetxt(save_path + '/predictions/chunk_pred_train.txt',
                               chunk_pred_train,
                               fmt='%s')
                    print('writing to ' + save_path +
                          '/predictions/chunk_pred_train.txt')
                    np.savetxt(save_path + '/predictions/chunk_pred_val.txt',
                               chunk_pred_val,
                               fmt='%s')
                    print('writing to ' + save_path +
                          '/predictions/chunk_pred_val.txt')
                    np.savetxt(save_path + '/predictions/pos_pred_train.txt',
                               pos_pred_train,
                               fmt='%s')
                    print('writing to ' + save_path +
                          '/predictions/pos_pred_train.txt')
                    np.savetxt(save_path + '/predictions/pos_pred_val.txt',
                               pos_pred_val,
                               fmt='%s')
                    print('writing to ' + save_path +
                          '/predictions/pos_pred_val.txt')

            print('Getting Testing Predictions (Valid)')

            test_loss, posp_test, chunkp_test, lmp_test, post_test, chunkt_test, lmt_test, pos_test_loss, chunk_test_loss, lm_test_loss = \
                run_epoch_random.run_epoch(session, mValid,
                          words_test, words_ptb, pos_test, pos_ptb, chunk_test, chunk_ptb,
                          num_pos_tags, num_chunk_tags, vocab_size, num_steps, gold_embed, config,
                          verbose=True,  model_type=model_type, valid=True)

            # prediction reshaping
            posp_test = reader._res_to_list(posp_test,
                                            config.batch_size,
                                            pos_to_id,
                                            len(words_test),
                                            test_lengths,
                                            to_str=True)
            chunkp_test = reader._res_to_list(chunkp_test,
                                              config.batch_size,
                                              chunk_to_id,
                                              len(words_test),
                                              test_lengths,
                                              to_str=True)

            post_test = reader._res_to_list(post_test,
                                            config.batch_size,
                                            pos_to_id,
                                            len(words_test),
                                            test_lengths,
                                            to_str=True)
            chunkt_test = reader._res_to_list(chunkt_test,
                                              config.batch_size,
                                              chunk_to_id,
                                              len(words_test),
                                              test_lengths,
                                              to_str=True)

            # find the accuracy
            print('finding  test accuracy')
            pos_acc_train = np.sum(posp_test == post_test) / float(
                len(posp_test))
            chunk_F1_train = sklearn.metrics.f1_score(chunkt_test,
                                                      chunkp_test,
                                                      average="weighted")

            print("POS Test Accuracy: " + str(pos_acc_train))
            print("Chunk Test F1: " + str(chunk_F1_train))

            # Save loss & accuracy plots
            np.savetxt(save_path + '/loss/valid_loss_stats.txt',
                       valid_loss_stats)
            np.savetxt(save_path + '/loss/valid_pos_loss_stats.txt',
                       valid_pos_loss_stats)
            np.savetxt(save_path + '/loss/valid_chunk_loss_stats.txt',
                       valid_chunk_loss_stats)
            np.savetxt(save_path + '/accuracy/valid_pos_stats.txt',
                       valid_pos_stats)
            np.savetxt(save_path + '/accuracy/valid_chunk_stats.txt',
                       valid_chunk_stats)

            np.savetxt(save_path + '/loss/train_loss_stats.txt',
                       train_loss_stats)
            np.savetxt(save_path + '/loss/train_pos_loss_stats.txt',
                       train_pos_loss_stats)
            np.savetxt(save_path + '/loss/train_chunk_loss_stats.txt',
                       train_chunk_loss_stats)
            np.savetxt(save_path + '/accuracy/train_pos_stats.txt',
                       train_pos_stats)
            np.savetxt(save_path + '/accuracy/train_chunk_stats.txt',
                       train_chunk_stats)

    # model that trains, given hyper-parameters
    with tf.Graph().as_default(), tf.Session() as session:
        if write_to_file == True:
            initializer = tf.random_uniform_initializer(
                -config.init_scale, config.init_scale)

            with tf.variable_scope("final_model",
                                   reuse=None,
                                   initializer=initializer):
                mTrain = Shared_Model(is_training=True,
                                      config=config,
                                      num_pos_tags=num_pos_tags,
                                      num_chunk_tags=num_chunk_tags,
                                      vocab_size=vocab_size,
                                      num_steps=num_steps,
                                      embedding_dim=config.word_embedding_size,
                                      projection_size=projection_size)

            with tf.variable_scope("final_model",
                                   reuse=True,
                                   initializer=initializer):
                mTest = Shared_Model(is_training=False,
                                     config=config,
                                     num_pos_tags=num_pos_tags,
                                     num_chunk_tags=num_chunk_tags,
                                     vocab_size=vocab_size,
                                     num_steps=num_steps,
                                     embedding_dim=config.word_embedding_size,
                                     projection_size=projection_size)

            print('initialising variables')
            tf.initialize_all_variables().run()
            print('initialising word embeddings')
            session.run(mTrain.embedding_init,
                        {mTrain.embedding_placeholder: word_embedding})
            session.run(mTest.embedding_init,
                        {mTest.embedding_placeholder: word_embedding})

            print('Train Given Best Epoch Parameter :' + str(best_epoch[0]))
            for i in range(best_epoch[0]):
                print("Epoch: %d" % (i + 1))
                if i > num_batches_gold:
                    gold_percent = gold_percent * 0.8
                else:
                    gold_percent = 1
                if np.random.rand(1) < gold_percent:
                    gold_embed = 1
                else:
                    gold_embed = 0
                _, posp_c, chunkp_c, _, post_c, chunkt_c, _, _, _, _ = \
                    run_epoch_random.run_epoch(session, mTrain,
                              words_c, words_ptb, pos_c, pos_ptb, chunk_c, chunk_ptb,
                              num_pos_tags, num_chunk_tags, vocab_size, num_steps, num_batches_gold, config,
                              verbose=True, model_type=model_type)

            print('Getting Testing Predictions')
            valid_loss, posp_test, chunkp_test, lmp_test, post_test, chunkt_test, lmt_test, pos_test_loss, chunk_test_loss, lm_test_loss = \
                run_epoch_random.run_epoch(session, mTest,
                          words_test, words_ptb, pos_test, pos_ptb, chunk_test, chunk_ptb,
                          num_pos_tags, num_chunk_tags, vocab_size, num_steps, num_batches_gold, config,
                          verbose=True,  model_type=model_type, valid=True)

            print('Writing Predictions')
            # prediction reshaping
            posp_c = reader._res_to_list(posp_c,
                                         config.batch_size,
                                         pos_to_id,
                                         len(words_c),
                                         combined_lengths,
                                         to_str=True)
            posp_test = reader._res_to_list(posp_test,
                                            config.batch_size,
                                            pos_to_id,
                                            len(words_test),
                                            test_lengths,
                                            to_str=True)
            chunkp_c = reader._res_to_list(chunkp_c,
                                           config.batch_size,
                                           chunk_to_id,
                                           len(words_c),
                                           combined_lengths,
                                           to_str=True)
            chunkp_test = reader._res_to_list(chunkp_test,
                                              config.batch_size,
                                              chunk_to_id,
                                              len(words_test),
                                              test_lengths,
                                              to_str=True)

            post_c = reader._res_to_list(post_c,
                                         config.batch_size,
                                         pos_to_id,
                                         len(words_c),
                                         combined_lengths,
                                         to_str=True)
            post_test = reader._res_to_list(post_test,
                                            config.batch_size,
                                            pos_to_id,
                                            len(words_test),
                                            test_lengths,
                                            to_str=True)
            chunkt_c = reader._res_to_list(chunkt_c,
                                           config.batch_size,
                                           chunk_to_id,
                                           len(words_c),
                                           combined_lengths,
                                           to_str=True)
            chunkt_test = reader._res_to_list(chunkt_test,
                                              config.batch_size,
                                              chunk_to_id,
                                              len(words_test),
                                              test_lengths,
                                              to_str=True)

            # save pickle - save_path + '/saved_variables.pkl'
            print('saving checkpoint')
            saveload.save(save_path + '/fin_model.ckpt', session)

            id_to_word = {v: k for k, v in word_to_id.items()}

            words_t = [id_to_word[k] for k in np.concatenate(words_t)]
            words_v = [id_to_word[k] for k in np.concatenate(words_v)]
            words_c = [id_to_word[k] for k in np.concatenate(words_c)]
            words_test = [id_to_word[k] for k in np.concatenate(words_test)]

            # find the accuracy
            print('finding  test accuracy')
            pos_acc = np.sum(posp_test == post_test) / float(len(posp_test))
            chunk_F1 = sklearn.metrics.f1_score(chunkt_test,
                                                chunkp_test,
                                                average="weighted")

            print("POS Test Accuracy (Both): " + str(pos_acc))
            print("Chunk Test F1 (Both): " + str(chunk_F1))

            print("POS Test Accuracy (Train): " + str(pos_acc_train))
            print("Chunk Test F1 (Train): " + str(chunk_F1_train))

            if test == False:
                train_custom = [
                    words_t,
                    np.char.upper(post_t),
                    np.char.upper(chunkt_t)
                ]
                valid_custom = [
                    words_v,
                    np.char.upper(post_v),
                    np.char.upper(chunkt_v)
                ]
            combined = [
                words_c,
                np.char.upper(post_c),
                np.char.upper(chunkt_c)
            ]
            test_data = [
                words_test,
                np.char.upper(post_test),
                np.char.upper(chunkt_test)
            ]

            print('loaded text')

            if test == False:
                chunk_pred_train = np.concatenate(
                    (np.transpose(train_custom),
                     np.char.upper(chunkp_t).reshape(-1, 1)),
                    axis=1)
                chunk_pred_val = np.concatenate(
                    (np.transpose(valid_custom),
                     np.char.upper(chunkp_v).reshape(-1, 1)),
                    axis=1)
            chunk_pred_c = np.concatenate(
                (np.transpose(combined), np.char.upper(chunkp_c).reshape(
                    -1, 1)),
                axis=1)
            chunk_pred_test = np.concatenate(
                (np.transpose(test_data), np.char.upper(chunkp_test).reshape(
                    -1, 1)),
                axis=1)
            if test == False:
                pos_pred_train = np.concatenate(
                    (np.transpose(train_custom), np.char.upper(posp_t).reshape(
                        -1, 1)),
                    axis=1)
                pos_pred_val = np.concatenate(
                    (np.transpose(valid_custom), np.char.upper(posp_v).reshape(
                        -1, 1)),
                    axis=1)
            pos_pred_c = np.concatenate(
                (np.transpose(combined), np.char.upper(posp_c).reshape(-1, 1)),
                axis=1)
            pos_pred_test = np.concatenate(
                (np.transpose(test_data), np.char.upper(posp_test).reshape(
                    -1, 1)),
                axis=1)

            print('finished concatenating, about to start saving')

            if test == False:
                np.savetxt(save_path + '/predictions/chunk_pred_train.txt',
                           chunk_pred_train,
                           fmt='%s')
                print('writing to ' + save_path +
                      '/predictions/chunk_pred_train.txt')
                np.savetxt(save_path + '/predictions/chunk_pred_val.txt',
                           chunk_pred_val,
                           fmt='%s')
                print('writing to ' + save_path +
                      '/predictions/chunk_pred_val.txt')

            np.savetxt(save_path + '/predictions/chunk_pred_combined.txt',
                       chunk_pred_c,
                       fmt='%s')
            print('writing to ' + save_path +
                  '/predictions/chunk_pred_combined.txt')
            np.savetxt(save_path + '/predictions/chunk_pred_test.txt',
                       chunk_pred_test,
                       fmt='%s')
            print('writing to ' + save_path +
                  '/predictions/chunk_pred_test.txt')

            if test == False:
                np.savetxt(save_path + '/predictions/pos_pred_train.txt',
                           pos_pred_train,
                           fmt='%s')
                print('writing to ' + save_path +
                      '/predictions/pos_pred_train.txt')
                np.savetxt(save_path + '/predictions/pos_pred_val.txt',
                           pos_pred_val,
                           fmt='%s')
                print('writing to ' + save_path +
                      '/predictions/pos_pred_val.txt')

            np.savetxt(save_path + '/predictions/pos_pred_combined.txt',
                       pos_pred_c,
                       fmt='%s')
            np.savetxt(save_path + '/predictions/pos_pred_test.txt',
                       pos_pred_test,
                       fmt='%s')

        else:
            print('Best Validation F1 ' + str(best_epoch[1]))
            print('Best Validation Epoch ' + str(best_epoch[0]))