# optimizerLstm = optim.Adam(lstm.parameters(), lr=opt.lr, betas=(0.9, 0.999)) mse_loss = nn.MSELoss() cse_loss = nn.CrossEntropyLoss() l1_loss = nn.L1Loss() netE.cuda() netD.cuda() lstm.cuda() mse_loss.cuda() cse_loss.cuda() # ################################################################################################################# # DATASET PREPARATION train_data = CASIAB(is_train_data=True, train_structure=train_structure, test_structure=test_structure, opt=opt) train_loader = DataLoader(train_data, num_workers=opt.data_threads, batch_size=opt.batch_size, shuffle=True, drop_last=False, pin_memory=True) training_batch_generator = get_training_batch(train_loader) test_data = CASIAB(is_train_data=False, train_structure=train_structure, test_structure=test_structure, opt=opt) test_loader = DataLoader(test_data,
schedulerLSTM = torch.optim.lr_scheduler.StepLR(optimizerLstm, 500, 0.9) mse_loss = nn.MSELoss() cse_loss = nn.CrossEntropyLoss() l1_loss = nn.L1Loss() netE.cuda() netD.cuda() lstm.cuda() mse_loss.cuda() cse_loss.cuda() # ################################################################################################################# # DATASET PREPARATION train_data = CASIAB(is_train_data=True, train_structure=train_structure, test_structure=test_structure, opt=opt) train_loader = DataLoader(train_data, num_workers=opt.data_threads, batch_size=opt.batch_size, shuffle=True, drop_last=False, pin_memory=True) training_batch_generator = get_training_batch(train_loader) test_data = CASIAB(is_train_data=False, train_structure=train_structure, test_structure=test_structure, opt=opt) test_loader = DataLoader(test_data,
trp_loss.cuda() # l1_crit = nn.L1Loss(size_average=False) # reg_loss = 0 # for param in netE.parameters(): # reg_loss += l1_crit(param) # # factor = 0.0005 # loss = factor * reg_loss # ################################################################################################################# # DATASET PREPARATION from utils.dataloader import get_training_batch train_data1 = CASIAB(is_train_data=True, data_root=opt.data_root, clip_len=opt.max_step, im_height=opt.im_height, im_width=opt.im_width, seed=opt.seed) train_loader = DataLoader(train_data1, num_workers=opt.data_threads, batch_size=opt.batch_size, shuffle=True, drop_last=True, pin_memory=True) training_batch_generator1 = get_training_batch(train_loader) test_data = CASIAB(is_train_data=False, data_root=opt.data_root, clip_len=opt.max_step, im_height=opt.im_height,
mse_loss = nn.MSELoss() cse_loss = nn.CrossEntropyLoss() l1_loss = nn.L1Loss() netE.cuda() netD.cuda() lstm.cuda() mse_loss.cuda() cse_loss.cuda() # ################################################################################################################# # DATASET PREPARATION train_data = CASIAB( is_train_data=True, train_structure=train_structure, test_structure=test_structure, opt=opt ) train_loader = DataLoader(train_data, num_workers=opt.data_threads, batch_size=opt.batch_size, shuffle=True, drop_last=False, pin_memory=True) training_batch_generator = get_training_batch(train_loader) test_data = CASIAB( is_train_data=False, train_structure=train_structure, test_structure=test_structure,