コード例 #1
0
    input_size=(7, 7),
    input_dim=1,
    hidden_dim=[8, 16, 32, 64],
    kernel_size=(3, 3),
    num_layers=3,
)

decoder = ConvLSTM(
    input_size=(7, 7),
    input_dim=64,
    hidden_dim=[32, 16, 8, 1],
    kernel_size=(3, 3),
    num_layers=3,
)

encoder.cuda()
decoder.cuda()

crit = nn.MSELoss()
crit.cuda()

threshold = nn.Threshold(0., 0.0)
#params = list(encoder.parameters()) + list(decoder.parameters())
params = itertools.chain(encoder.parameters(), decoder.parameters())
optimizer = optim.Adam(params)  #, lr=0.01)#, weight_decay=1e-4)

# Decay LR by a factor of 0.1 every 5 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)
#exp_lr_scheduler = lr_scheduler.ExponentialLR(optimizer, step_size=3, gamma=0.1)

#exp_lr_scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, patience=0, threshold=1e-4, mode='min',
コード例 #2
0
lookup_tensor = torch.LongTensor([word_to_ix["hello"]])
hello_embed = embeds(autograd.Variable(lookup_tensor))
lookup_tensor = torch.LongTensor([word_to_ix["world"]])
world_embed = embeds(autograd.Variable(lookup_tensor))

print("Hello Emb:", hello_embed)

encoder = ConvLSTM(
    input_size=(feat_dim_h, feat_dim_w),
    input_dim=feat_dim_chan + feat_dim_h,
    hidden_dim=hidden_size,
    kernel_size=(3, 3),
    num_layers=2,
)

encoder.cuda()

crit = nn.MSELoss()  #nn.BCELoss()
crit.cuda()

threshold = nn.Threshold(0., 0.0)

params = list(encoder.parameters())
optimizer = optim.Adam(params, lr=0.001)

s = 1
input = None
hidden_states = None
for e in range(5):
    optimizer.zero_grad()
    input = None
コード例 #3
0
test_loader = DataLoader(test_dataset,
                         batch_size=opt.batch_size,
                         shuffle=False,
                         num_workers=opt.n_cpu)

model = ConvLSTM(opt.in_channels,
                 2 * opt.in_channels,
                 opt.kernel_size,
                 opt.num_layers,
                 batch_first=False,
                 bias=True,
                 return_all_layers=False)
#model = PredictorLSTM(opt.input_size, opt.hidden_size, opt.num_layers, opt.out_size)  # 27 *8
use_gpu = True if torch.cuda.is_available() else False
if use_gpu:
    model = model.cuda()

criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                 milestones=opt.milestones,
                                                 gamma=0.5)

header = ['epoch/total_epoch', 'test_mse']
with open(test_mse_path, 'w') as testcsvmes:  # open trainfile
    writertest = csv.writer(testcsvmes)
    writertest.writerow(header)
    # trainning
    for epoch in range(1, opt.n_epoch + 1):
        print('\repoch {}'.format(epoch))
        scheduler.step()
コード例 #4
0
                   activation=F.tanh
                  )

lstm_decoder = ConvLSTM(
                   input_size=(hidden_spt,hidden_spt),
                   input_dim=hidden_dim,
                   hidden_dim=lstm_dims,
                   kernel_size=(3,3),
                   num_layers=3,
                   peephole=True,
                   batchnorm=False,
                   batch_first=True,
                   activation=F.tanh
                  )

lstm_encoder.cuda()
lstm_decoder.cuda()


sigmoid = nn.Sigmoid()
crit = nn.BCELoss()
crit.cuda()


params = list(cnn_encoder.parameters()) + list(cnn_decoder.parameters()) + \
         list(lstm_encoder.parameters()) + list(lstm_decoder.parameters())

p_optimizer = optim.Adam(params)

#--------train---------