dim=0).to(device) scores.append(f(x).item()) summary[v] = scores machine_summary = tools.clip2frame(summary) return evaluate(machine_summary, category_dict, args.topk_mAP) if __name__ == '__main__': device = torch.device( "cuda") if torch.cuda.is_available() else torch.device("cpu") loader_dict = dict(shuffle=True, batch_size=args.batch_size, pin_memory=True, num_workers=16) # train_path = os.path.join("/home/xuanteng/Highlight/proDataset/TrainingSet") dataset = Pairs(args.train_path, args.domain, args.num_per_group) train_loader = DataLoader(dataset, **loader_dict) # recoder = Recorder("{}_{}".format(args.dataset, args.domain)) f = model.FNet().to(device) h = model.HNet().to(device) limloss = model.LIMloss().to(device) opt = optim.SGD(list(f.parameters()) + list(h.parameters()), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) mAPs = [] for epoch_idx in range(args.epoch): # logging.info("in epoch {}:\n".format(epoch_idx)) train(epoch_idx) if epoch_idx % args.interval == 0:
# limloss = nn.DataParallel(limloss, device_ids=device_ids) opt = optim.SGD(list(rnn.parameters()) + list(f.parameters()) + list(h.parameters()), lr=lr, momentum=momentum, weight_decay=weight_decay) scheduder = lr_scheduler.StepLR(opt, step_size=step_size, gamma=gamma) # dataloader dataloader_args = dict(shuffle=False, batch_size=batch_size, pin_memory=True, num_workers=12) train_loader = DataLoader(Pairs(domain, seq_len), **dataloader_args) def train(domain): rnn.train() f.train() h.train() limloss.train() if not os.path.exists(os.path.join('./', 'train_results')): os.mkdir(os.path.join('./', 'train_results')) if not os.path.exists(os.path.join('./', 'model_params')): os.mkdir(os.path.join('./', 'model_params')) with open( os.path.join('./', 'train_results', '{}_train.txt'.format(domain)), 'w') as file: