Ejemplo n.º 1
0
training_dataset = TCNDataset(training=True)
training_dataloader = torch.utils.data.DataLoader(training_dataset,
                                                  collate_fn=collate_fn_padd,
                                                  batch_size=batch_size,
                                                  shuffle=True,
                                                  drop_last=False)

test_dataset = TCNDataset(training=False)
test_dataloader = torch.utils.data.DataLoader(test_dataset,
                                              collate_fn=collate_fn_padd,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              drop_last=False)

single_TCN = TCN()
single_TCN = single_TCN.to(device)
single_TCN_optimizer = torch.optim.Adam(single_TCN.parameters(), lr=0.001)

multi_stage_TCN = MultiStageTCN()
multi_stage_TCN = multi_stage_TCN(device)
multi_stage_TCN_optimizer = torch.optim.Adam(multi_stage_TCN.parameters(),
                                             lr=0.001)

multi_stage_TCN_video_loss = MultiStageTCN()
multi_stage_TCN_video_loss = multi_stage_TCN_video_loss.to(device)
multi_stage_TCN_optimizer = torch.optim.Adam(
    multi_stage_TCN_video_loss.parameters(), lr=0.001)

parallel_TCNs = ParallelTCNs()
parallel_TCNs = parallel_TCNs.to(device)
parallel_TCNs_optimizer = torch.optim.Adam(parallel_TCNs.parameters(),
Ejemplo n.º 2
0
# TCN model
embedding_size = 300  # dimension of character embeddings
dropout_rate = 0.1
emb_dropout_rate = 0.1
levels = 3  # # of levels
nhid = 450  # number of hidden units per layer
num_chans = [nhid] * (levels - 1) + [embedding_size]
model = TCN(vocab_size=datasets['train'].vocab_size,
            embed_size=embedding_size,
            num_channels=num_chans,
            bos_idx=symbols['<bos>'],
            eos_idx=symbols['<eos>'],
            pad_idx=symbols['<pad>'],
            dropout=dropout_rate,
            emb_dropout=emb_dropout_rate)
model = model.to(device)
print(model)

# folder to save model
save_path = 'model'
if not os.path.exists(save_path):
    os.makedirs(save_path)

# objective function
learning_rate = 4
criterion = nn.CrossEntropyLoss(size_average=False,
                                ignore_index=symbols['<pad>'])
optimizer = optim.SGD(model.parameters(), lr=learning_rate)  #Adam


# negative log likelihood