def main():
	"""runs script functionality"""
	configure_usage('text_reconstruction.py')

	opt = cfg.opt
	model = cfg.model
	text = cfg.train
	n_batch = cfg.n_batch
	valid = cfg.valid
	nv_batch = cfg.nv_batch
	test = cfg.test
	nt_batch = cfg.nt_batch
	TIMESTEPS = cfg.seq_length
	lr_scheduler = cfg.lr
	outer_loop = cfg.outer_loop
	saver = cfg.saver
	save_iters = opt.save_iters
	save_epochs = opt.save_epochs
	histories = []
	val_histories = []
	test_histories = []
	if opt.benchmark:
		torch.backends.cudnn.benchmark = True
	logger = cfg.logger

	train_fn = make_data_fn(True, TIMESTEPS)
	eval_fn = make_data_fn(False, TIMESTEPS)

	for e in outer_loop:
		loss_avg = 0.
		history = []
		try:
			if text is not None:
				_, history = run_epoch(model, e, text, train_fn, n_batch, is_training=not opt.should_test,
										inner_lr=cfg.inner_lr, save_every=save_iters,
										saver=lambda ext, x: saver('e'+str(e)+ext, x))
				if save_epochs > 0 and e % save_epochs == 0:
					saver('e'+str(e)+'.pt',history)
				else:
					logger.log_pkl(histories, 'histories', 'e%s.pkl' % (e,), 'wb')
			if valid is not None:
				_, val_history = run_epoch(model, e, valid, eval_fn, nv_batch, is_training=False)
				val_histories.append(val_history)
				logger.log_pkl(val_histories, 'val_history', 'e%s.pkl' % (e,), 'wb')
			if test is not None:
				_, test_history = run_epoch(model, e, test, eval_fn, nt_batch, is_training=False)
				test_histories.append(test_history)
				logger.log_pkl(test_histories, 'test_history', 'e%s.pkl' % (e,), 'wb')

			if opt.lr_scheduler == 'linear' or opt.no_loss:
				pass
			lr_scheduler.step()
			e += 1

		except Exception as ex:
			saver('e'+str(e)+'.pt', history)
			print('Exiting from training early')
			raise ex
			exit()
Exemplo n.º 2
0
            overwrite *= -1
        t_g, v_g = generate_text(o, s, embed, rnn, neuron, last,
                                 seq_length - len(t), temperature, overwrite)
        out_text += t_g
        out_values += v_g
        print(''.join(out_text).encode('utf-8'))

    save_str = ''
    if logger is not None:
        save_str = logger.get_log_dir('heatmaps')
    save_str = os.path.join(save_str, ''.join(out_text[:50]) + '.png')
    plot_neuron_heatmap(out_text, out_values, save_str, negate)


if __name__ == '__main__':
    configure_usage('visualize.py')

    opt = cfg.opt
    embed = cfg.model.module.embedder
    rnn = cfg.model.module.rnn
    states = cfg.model.init_state
    logger = cfg.logger
    visualize(text=opt.text,
              embed=embed,
              rnn=rnn,
              init_state=states,
              seq_length=opt.seq_length,
              temperature=opt.temperature,
              overwrite=opt.overwrite,
              neuron=opt.neuron,
              logger=logger,
def main():
    """runs script functionality"""
    configure_usage('text_reconstruction.py')

    opt = cfg.opt
    model = cfg.model
    text = cfg.train
    n_batch = cfg.n_batch
    valid = cfg.valid
    nv_batch = cfg.nv_batch
    test = cfg.test
    nt_batch = cfg.nt_batch
    TIMESTEPS = cfg.seq_length
    lr_scheduler = cfg.lr
    outer_loop = cfg.outer_loop
    saver = cfg.saver
    histories = []
    val_histories = []
    test_histories = []
    if opt.benchmark:
        torch.backends.cudnn.benchmark = True
    logger = cfg.logger

    train_fn = make_data_fn(True, TIMESTEPS)
    eval_fn = make_data_fn(False, TIMESTEPS)

    for e in outer_loop:
        loss_avg = 0.
        history = []
        try:
            if opt.train != 'None':
                _, history = run_epoch(
                    model,
                    e,
                    text,
                    train_fn,
                    n_batch,
                    is_training=not opt.should_test,
                    inner_lr=cfg.inner_lr,
                    saver=lambda ext, x: saver('e' + str(e) + ext, x))
                histories.append(history)
                pkl.dump(histories, open(loss_file, 'wb'))
            if opt.valid != 'None':
                _, val_history = run_epoch(model,
                                           e,
                                           valid,
                                           eval_fn,
                                           nv_batch,
                                           is_training=False)
                logger.log_pkl(str(np.mean(val_histories)), 'val_history',
                               'e%s.pkl' % (e, ), 'wb')
                # pkl.dump(str(np.mean(val_history)), open(os.path.basename(save_file)+'.'+'val_loss.pkl', 'wb'))
                val_histories.append(val_history)
            if opt.test != 'None':
                _, test_history = run_epoch(model,
                                            e,
                                            test,
                                            eval_fn,
                                            nt_batch,
                                            is_training=False)
                logger.log_pkl(str(np.mean(test_histories)), 'test_history',
                               'e%s.pkl' % (e, ), 'wb')
                # pkl.dump(str(np.mean(test_history)), open(os.path.basename(save_file)+'.'+'test_loss.pkl', 'wb'))
                test_histories.append(test_history)

            #save progress
            saver('e' + str(e), history)

            if opt.lr_scheduler == 'linear' or opt.no_loss:
                pass
            lr_scheduler.step()
            e += 1

        except Exception as ex:
            saver('e' + str(e), history)
            print('Exiting from training early')
            raise ex
            exit()
	for i, _ in enumerate(top_logits):
		row = []
		row.append(all_proba[i][1])
		row.append(masked_proba[i][1])
		if use_five and five_proba is not None:
			row.append(five_proba[i][1])
		if use_five and five_logits is not None:
			row.extend(list(five_logits[i].squeeze()))
		else:
			row.extend(list(top_logits[i].squeeze()))
		yield row


if __name__ == '__main__':
	configure_usage('sentiment_transfer.py')

	opt = cfg.opt
	model = cfg.model
	batch_size = cfg.batch_size
	trX = cfg.train
	trY = None
	vaX = cfg.valid
	vaY = None
	teX = cfg.test
	teY = None
	logger = cfg.logger

	# format output logging
	dsname = os.path.basename(os.path.split(opt.train)[0])
	formatted_name = format_task_prefix(opt.load_model, dsname)