Пример #1
0
def run_epoch(session, model, char_data, tag_data, dict_data, len_data, eval_op, batch_size, verbose=False):
    """Runs the model on the given data."""
    start_time = time.time()
    losses = 0.0
    iters = 0.0

    char_data, tag_data, dict_data, len_data = reader.shuffle(char_data, tag_data, dict_data, len_data)
    xArray, yArray, dArray, lArray = reader.iterator(char_data, tag_data, dict_data, len_data, batch_size)

    for x, y, d, l in zip(xArray, yArray, dArray, lArray):
        fetches = [model.loss, model.logits, eval_op]
        feed_dict = {}
        feed_dict[model.input_data] = x
        feed_dict[model.targets] = y
        feed_dict[model.dicts] = d
        feed_dict[model.seq_len] = l
        loss, logits, _ = session.run(fetches, feed_dict)
        losses += loss
        iters += 1

        if verbose and iters % 50 == 0:
            print("%.3f perplexity: %.3f" %
                  (iters / float(len(xArray)), np.exp(losses / iters / len(xArray))))

    return np.exp(losses / iters)
Пример #2
0
def load_data():
    with open('input/data.txt', encoding='utf-8') as file:
        X = []
        y = []
        seq_len = []

        X, y = shuffle(X, y)
        X = X.tolist()
        y = y.tolist()

        for index, lien in enumerate(file.readlines()):
            if index % 2 == 0:
                X.append(lien.strip())
            else:
                y.append(lien.strip())
        word2idx, _ = load_vocab()
        x_index = []
        for sentence in X:
            sentence = list(sentence.replace(' ', ''))
            if len(sentence) < 15:
                seq_len.append(len(sentence))
            else:
                seq_len.append(15)
            # max_len = max(seq_len)
            sentence_index = [
                word2idx[i] if i in list(word2idx.keys()) else 1
                for i in sentence
            ]
            x_index.append(sentence_index)
        X = pad_sequences(x_index, maxlen=15, value=0)

        tag2idx, _ = load_tag()
        tag_indexs = []
        for tag in y:
            tag = tag.split(' ')
            tag_index = [
                tag2idx[i] if i in list(tag2idx.keys()) else 0 for i in tag
            ]
            tag_indexs.append(tag_index)
        y = pad_sequences(tag_indexs, maxlen=15, value=0)

        return X, y, seq_len
Пример #3
0
def main():
    parser = argparse.ArgumentParser(
        description=
        'Split train.csv into train, dev, and test splits. Specify dev and validation set sizes with args, the remainder is used for training.'
    )
    parser.add_argument(
        '--dataset-file',
        required=True,
        help='path to the train.csv file containing the quora training data')
    parser.add_argument('--ndev',
                        type=int,
                        default=1e4,
                        help='size of dev set to create')
    parser.add_argument('--nvalid',
                        type=int,
                        default=5e4,
                        help='size of validation set to create')
    parser.add_argument(
        '--output-dir',
        required=True,
        help='directory to which to write train.csv, dev.csv, and valid.csv')
    parser.add_argument(
        '--seed',
        type=int,
        help=
        'optional random seed to have reproducibility between multiple uses of this tool'
    )
    args = parser.parse_args()

    data = du.load_csv(args.dataset_file)
    shuffled = du.shuffle(data, args.seed)

    ntrain = len(data) - args.ndev - args.nvalid
    train, dev, valid = du.split(shuffled, ntrain, args.ndev, args.nvalid)

    du.write_csv(train, os.path.join(args.output_dir, 'train.csv'))
    du.write_csv(dev, os.path.join(args.output_dir, 'dev.csv'))
    du.write_csv(valid, os.path.join(args.output_dir, 'valid.csv'))
Пример #4
0
##############################################################################################
# TRAINING and results
##############################################################################################
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())


FG = []
FD = []
F_GEO = []

for epoch in tqdm(range(n_epoch), total=n_epoch):
	
	BHA_gen = np.array([]);
	AHB_gen = np.array([]);
	AH_dataset, BH_dataset= shuffle(AH_dataset, BH_dataset)

	for x, y in iter_data(AH_dataset, BH_dataset, size=batch_size):
		for _ in range(1):
			f_d, _ = sess.run([disc_loss, train_disc_op], feed_dict={x_AH: x, y_BH:y})
		for _ in range(5):
			f_g, _ = sess.run([[adv_loss, cost_BH, cost_AH], train_gen_op], feed_dict={x_AH: x, y_BH:y})
		FG.append(f_g)
		FD.append(f_d)


	for x, y in iter_data(AH_dataset, BH_dataset, size=batch_size):

		
		temp_BHA_gen = sess.run(BHA, feed_dict={x_AH: x, y_BH:y})
		BHA_gen = np.vstack([BHA_gen, temp_BHA_gen]) if BHA_gen.size else temp_BHA_gen
Пример #5
0
#minimize generators loss
train_gen_op = opt.minimize(gen_loss, var_list=qvars + pvars)

#minimize discirimaintors loss
train_disc_op = opt.minimize(disc_loss, var_list=dvars_x + dvars_z)
""" training """
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())

FG = []
FD = []

#for each epoch (log the status bar)
for epoch in tqdm(range(n_epoch), total=n_epoch):
    #sample from both our datasets
    X_dataset, Z_dataset = shuffle(X_dataset, Z_dataset)

    #for each x and z in our data
    for xmb, zmb in iter_data(X_dataset, Z_dataset, size=batch_size):

        #minimize our loss functions
        for _ in range(1):
            f_d, _ = sess.run([disc_loss, train_disc_op],
                              feed_dict={
                                  x: xmb,
                                  z: zmb
                              })
        for _ in range(5):
            #3 components that make up generator loss
            f_g, _ = sess.run([[adv_loss, cost_x, cost_z], train_gen_op],
                              feed_dict={