Exemplo n.º 1
0
use_gpu = torch.cuda.is_available()
if use_gpu:
    torch.cuda.set_device(gpu_to_use)

if not os.path.isdir(output_dir):
    os.mkdir(output_dir)

save_params = (os.path.join(output_dir,
                            model_name), os.path.join(output_dir, log_name))

rnn = EncoderDecoder(hidden_dim,
                     otu_handler,
                     num_lstms,
                     use_gpu,
                     LSTM_in_size=num_strains,
                     use_attention=use_attention)

rnn.do_training(inp_slice_len,
                target_slice_len,
                batch_size,
                num_epochs,
                learning_rate,
                samples_per_epoch,
                teacher_force_frac,
                weight_decay,
                save_params=save_params,
                use_early_stopping=use_early_stopping,
                early_stopping_patience=early_stopping_patience,
                inp_slice_incr_frequency=inp_slice_incr_frequency,
                target_slice_incr_frequency=target_slice_incr_frequency)
Exemplo n.º 2
0
                    "--vocab",
                    type=str,
                    help="The file with the vocab characters.")
args = parser.parse_args()
input_dir = args.data
vocab_file = args.vocab
files = [
    os.path.join(input_dir, f) for f in os.listdir(input_dir)
    if f.endswith('.csv')
]

TH = TweetHandler(files, vocab_file)
TH.set_train_split()
TH.remove_urls()

if not os.path.isdir(output_dir):
    os.mkdir(output_dir)

save_params = (os.path.join(output_dir,
                            model_name), os.path.join(output_dir, log_name))

enc = EncoderDecoder(hidden_dim, TH, num_lstms)
enc.do_training(seq_len,
                batch_size,
                num_epochs,
                learning_rate,
                samples_per_epoch,
                teacher_force_frac,
                slice_incr_frequency=slice_incr_frequency,
                save_params=save_params)