def main(): global args args = parser.parse_args() use_cuda = cuda_model.ifUseCuda(args.gpu_id, args.multiGpu) model = PointerNet(args.input_size, args.hidden_size, args.nlayers, args.dropout, args.bidir) if args.resume is not None: assert os.path.isfile( args.resume), 'Error: no checkpoint directory found!' checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage) args.start_epoch = checkpoint['epoch'] # args.start_epoch = 0 model.load_state_dict(checkpoint['state_dict'], strict=False) print("=> loading checkpoint '{:s}', epoch: {:d}\n".format( args.resume, args.start_epoch)) model = cuda_model.convertModel2Cuda(model, gpu_id=args.gpu_id, multiGpu=args.multiGpu) val_evaluator = Evaluator.Evaluator(dataset_name='SumMe', split='val', clip_size=100) if args.eval: model.eval() val_F1_score = val_evaluator.Evaluate(model, use_cuda) print "Val F1 Score: {:f}".format(val_F1_score) sys.exit(0) train_dataset = LocalDataLoader.Dataset(dataset_name='SumMe', split='train', clip_size=args.clip_size, output_score=True, sample_rates=[1, 5, 10]) val_dataset = LocalDataLoader.Dataset(dataset_name='SumMe', split='val', clip_size=args.clip_size, output_score=True) train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) model_optim = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr) NLL = torch.nn.NLLLoss() best_status = { 'train_accuracy': 0, 'val_accuracy': 0, 'train_loss': float('inf'), 'val_loss': float('inf') } isBest_status = { 'train_accuracy': 0, 'val_accuracy': 0, 'train_loss': 0, 'val_loss': 0 } for epoch in range(args.start_epoch, args.nof_epoch): total_losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() Accuracy_Top1 = AverageMeter() Accuracy_Top3 = AverageMeter() F1_Top1 = AverageMeter() F1_Top3 = AverageMeter() model.train() pbar = progressbar.ProgressBar(max_value=len(train_dataloader)) for i_batch, sample_batched in enumerate(train_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batched[0]) gt_index_batch = Variable(sample_batched[1]) score_batch = Variable(sample_batched[2]) if use_cuda: feature_batch = feature_batch.cuda() gt_index_batch = gt_index_batch.cuda() score_batch = score_batch.cuda() index_vector, segment_score = model(feature_batch) segment_indices = loss_transforms.torchVT_scores2indices( index_vector) overlap = loss_transforms.IoU_Overlaps(segment_indices.data, gt_index_batch.data) overlap = Variable(overlap, requires_grad=False) if use_cuda: overlap = overlap.cuda() cls_loss = losses.ClsLocLoss_Regression(segment_score, score_batch, overlap, thres=0.5) index_vector = index_vector.contiguous().view(-1, args.clip_size) gt_index_batch = gt_index_batch.view(-1) accuracy = Metrics.accuracy_topN(index_vector.data, gt_index_batch.data, topk=[1, 3]) F1 = Metrics.accuracy_F1(index_vector.data, gt_index_batch.data, topk=[1, 3]) loc_loss = NLL(torch.log(index_vector), gt_index_batch) total_loss = cls_loss + loc_loss model_optim.zero_grad() total_loss.backward() model_optim.step() total_losses.update(total_loss.data[0], feature_batch.size(0)) cls_losses.update(cls_loss.data[0], feature_batch.size(0)) loc_losses.update(loc_loss.data[0], feature_batch.size(0)) Accuracy_Top1.update(accuracy[0][0], feature_batch.size(0)) Accuracy_Top3.update(accuracy[1][0], feature_batch.size(0)) F1_Top1.update(F1[0], feature_batch.size(0)) F1_Top3.update(F1[1], feature_batch.size(0)) print( "Train -- Epoch :{:06d}, LR: {:.6f},\tloc-loss={:.4f}\tcls-loss={:.4f}\ttop1={:.4f}\ttop3={:.4f}\tF1_1={:.4f}\tF1_3={:.4f}" .format(epoch, model_optim.param_groups[0]['lr'], loc_losses.avg, cls_losses.avg, Accuracy_Top1.avg, Accuracy_Top3.avg, F1_Top1.avg, F1_Top3.avg)) if best_status['train_loss'] > total_losses.avg: best_status['train_loss'] = total_losses.avg isBest_status['train_loss'] = 1 if best_status['train_accuracy'] < Accuracy_Top1.avg: best_status['train_accuracy'] = Accuracy_Top1.avg isBest_status['train_accuracy'] = 1 model.eval() total_losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() Accuracy_Top1 = AverageMeter() Accuracy_Top3 = AverageMeter() F1_Top1 = AverageMeter() F1_Top3 = AverageMeter() pbar = progressbar.ProgressBar(max_value=len(val_dataloader)) for i_batch, sample_batched in enumerate(val_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batched[0]) gt_index_batch = Variable(sample_batched[1]) score_batch = Variable(sample_batched[2]) if use_cuda: feature_batch = feature_batch.cuda() gt_index_batch = gt_index_batch.cuda() score_batch = score_batch.cuda() index_vector, segment_score = model(feature_batch) segment_indices = loss_transforms.torchVT_scores2indices( index_vector) overlap = loss_transforms.IoU_Overlaps(segment_indices.data, gt_index_batch.data) overlap = Variable(overlap, requires_grad=False) if use_cuda: overlap = overlap.cuda() cls_loss = losses.ClsLocLoss_Regression(segment_score, score_batch, overlap, thres=0.5) index_vector = index_vector.contiguous().view(-1, args.clip_size) gt_index_batch = gt_index_batch.view(-1) accuracy = Metrics.accuracy_topN(index_vector.data, gt_index_batch.data, topk=[1, 3]) F1 = Metrics.accuracy_F1(index_vector.data, gt_index_batch.data, topk=[1, 3]) loc_loss = NLL(torch.log(index_vector), gt_index_batch) total_loss = cls_loss + loc_loss cls_losses.update(cls_loss.data[0], feature_batch.size(0)) loc_losses.update(loc_loss.data[0], feature_batch.size(0)) total_losses.update(total_loss.data[0], feature_batch.size(0)) Accuracy_Top1.update(accuracy[0][0], feature_batch.size(0)) Accuracy_Top3.update(accuracy[1][0], feature_batch.size(0)) F1_Top1.update(F1[0], feature_batch.size(0)) F1_Top3.update(F1[1], feature_batch.size(0)) print( "Test -- Epoch :{:06d}, LR: {:.6f},\tloc-loss={:.4f}\tcls-loss={:.4f}\ttop1={:.4f}\ttop3={:.4f}\tF1_1={:.4f}\tF1_3={:.4f}" .format(epoch, model_optim.param_groups[0]['lr'], loc_losses.avg, cls_losses.avg, Accuracy_Top1.avg, Accuracy_Top3.avg, F1_Top1.avg, F1_Top3.avg)) # val_F1_score = val_evaluator.EvaluateTop1(model, use_cuda) # print "Val F1 Score: {:f}".format(val_F1_score) if best_status['val_loss'] > total_losses.avg: best_status['val_loss'] = total_losses.avg isBest_status['val_loss'] = 1 if best_status['val_accuracy'] < Accuracy_Top1.avg: best_status['val_accuracy'] = Accuracy_Top1.avg isBest_status['val_accuracy'] = 1 save_checkpoint( { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'val_loss': best_status['val_loss'], 'val_accuracy': best_status['val_accuracy'], 'train_loss': best_status['train_loss'], 'train_accuracy': best_status['train_accuracy'] }, isBest_status, file_direcotry='vsPtrDep_Combine') for item in isBest_status.keys(): isBest_status[item] = 0
def main(): global args args = (parser.parse_args()) use_cuda = cuda_model.ifUseCuda(args.gpu_id, args.multiGpu) script_name_stem = dir_utils.get_stem(__file__) save_directory = dir_utils.get_dir( os.path.join( project_root, 'ckpts', '{:s}-{:s}-{:s}-split-{:d}-claweight-{:s}-{:.1f}-assgin{:.2f}-alpha{:.4f}-dim{:d}-dropout{:.4f}-seqlen{:d}-samplerate-{:d}-{:s}-{:s}' .format(script_name_stem, args.dataset, args.eval_metrics, args.split, str(args.set_cls_weight), args.cls_pos_weight, args.hassign_thres, args.alpha, args.hidden_dim, args.dropout, args.seq_len, args.sample_rate, loss_type[args.EMD], match_type[args.hmatch]))) log_file = os.path.join(save_directory, 'log-{:s}.txt'.format(dir_utils.get_date_str())) logger = log_utils.get_logger(log_file) log_utils.print_config(vars(args), logger) model = PointerNetwork(input_dim=args.input_dim, embedding_dim=args.embedding_dim, hidden_dim=args.hidden_dim, max_decoding_len=args.net_outputs, dropout=args.dropout, n_enc_layers=2, output_classes=2) hassign_thres = args.hassign_thres logger.info("Number of Params\t{:d}".format( sum([p.data.nelement() for p in model.parameters()]))) logger.info('Saving logs to {:s}'.format(log_file)) if args.resume is not None: ckpt_idx = 48 ckpt_filename = args.resume.format(ckpt_idx) assert os.path.isfile( ckpt_filename), 'Error: no checkpoint directory found!' checkpoint = torch.load(ckpt_filename, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint['state_dict'], strict=False) train_iou = checkpoint['IoU'] args.start_epoch = checkpoint['epoch'] logger.info("=> loading checkpoint '{}', current iou: {:.04f}".format( ckpt_filename, train_iou)) model = cuda_model.convertModel2Cuda(model, gpu_id=args.gpu_id, multiGpu=args.multiGpu) # get train/val split if args.dataset == 'SumMe': train_val_test_perms = np.arange(25) elif args.dataset == 'TVSum': train_val_test_perms = np.arange(50) # fixed permutation random.Random(0).shuffle(train_val_test_perms) train_val_test_perms = train_val_test_perms.reshape([5, -1]) train_val_perms = np.delete(train_val_test_perms, args.split, 0).reshape([-1]) train_perms = train_val_perms[:17] val_perms = train_val_perms[17:] test_perms = train_val_test_perms[args.split] logger.info(" training split: " + str(train_perms)) logger.info(" val split: " + str(val_perms)) logger.info(" test split: " + str(test_perms)) if args.location == 'home': data_path = os.path.join(os.path.expanduser('~'), 'datasets') else: data_path = os.path.join('/nfs/%s/boyu/SDN' % (args.location), 'datasets') train_dataset = vsSumLoader3_c3dd.cDataset(dataset_name=args.dataset, split='train', seq_length=args.seq_len, overlap=0.9, sample_rate=[args.sample_rate], train_val_perms=train_perms, data_path=data_path) val_evaluator = Evaluator.Evaluator(dataset_name=args.dataset, split='val', seq_length=args.seq_len, overlap=0.9, sample_rate=[args.sample_rate], sum_budget=0.15, train_val_perms=val_perms, eval_metrics=args.eval_metrics, data_path=data_path) test_evaluator = Evaluator.Evaluator(dataset_name=args.dataset, split='test', seq_length=args.seq_len, overlap=0.9, sample_rate=[args.sample_rate], sum_budget=0.15, train_val_perms=test_perms, eval_metrics=args.eval_metrics, data_path=data_path) train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) # val_dataloader = DataLoader(val_dataset, # batch_size=args.batch_size, # shuffle=False, # num_workers=4) model_optim = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=float(args.lr)) optim_scheduler = optim.lr_scheduler.ReduceLROnPlateau(model_optim, 'min', patience=10) alpha = args.alpha # cls_weights = torch.FloatTensor([0.2, 1.0]).cuda() if args.set_cls_weight: cls_weights = torch.FloatTensor([ 1. * train_dataset.n_positive_train_samples / train_dataset.n_total_train_samples, args.cls_pos_weight ]).cuda() else: cls_weights = torch.FloatTensor([0.5, 0.5]).cuda() logger.info(" total: {:d}, total pos: {:d}".format( train_dataset.n_total_train_samples, train_dataset.n_positive_train_samples)) logger.info(" classify weight: " + str(cls_weights[0]) + str(cls_weights[1])) for epoch in range(args.start_epoch, args.nof_epoch + args.start_epoch): total_losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() Accuracy = AverageMeter() IOU = AverageMeter() ordered_IOU = AverageMeter() model.train() pbar = progressbar.ProgressBar(max_value=len(train_dataloader)) for i_batch, sample_batch in enumerate(train_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batch[0]) start_indices = Variable(sample_batch[1]) end_indices = Variable(sample_batch[2]) gt_valids = Variable(sample_batch[3]) # seq_labels = Variable(sample_batch[3]) if use_cuda: feature_batch = feature_batch.cuda() start_indices = start_indices.cuda() end_indices = end_indices.cuda() gt_positions = torch.stack([start_indices, end_indices], dim=-1) head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, _ = model( feature_batch) pred_positions = torch.stack([head_positions, tail_positions], dim=-1) if args.hmatch: assigned_scores, assigned_locations, total_valid, total_iou = h_match.Assign_Batch_v2( gt_positions, pred_positions, gt_valids, thres=hassign_thres) else: assigned_scores, assigned_locations = f_match.Assign_Batch( gt_positions, pred_positions, gt_valids, thres=hassign_thres) _, _, total_valid, total_iou = h_match.Assign_Batch_v2( gt_positions, pred_positions, gt_valids, thres=hassign_thres) if total_valid > 0: IOU.update(total_iou / total_valid, total_valid) assigned_scores = Variable(torch.LongTensor(assigned_scores), requires_grad=False) assigned_locations = Variable(torch.LongTensor(assigned_locations), requires_grad=False) if use_cuda: assigned_scores = assigned_scores.cuda() assigned_locations = assigned_locations.cuda() cls_scores = cls_scores.contiguous().view(-1, cls_scores.size()[-1]) assigned_scores = assigned_scores.contiguous().view(-1) cls_loss = F.cross_entropy(cls_scores, assigned_scores, weight=cls_weights) if total_valid > 0: assigned_head_positions = assigned_locations[:, :, 0] assigned_head_positions = assigned_head_positions.contiguous( ).view(-1) # assigned_tail_positions = assigned_locations[:, :, 1] assigned_tail_positions = assigned_tail_positions.contiguous( ).view(-1) head_pointer_probs = head_pointer_probs.contiguous().view( -1, head_pointer_probs.size()[-1]) tail_pointer_probs = tail_pointer_probs.contiguous().view( -1, tail_pointer_probs.size()[-1]) assigned_head_positions = torch.masked_select( assigned_head_positions, assigned_scores.byte()) assigned_tail_positions = torch.masked_select( assigned_tail_positions, assigned_scores.byte()) head_pointer_probs = torch.index_select( head_pointer_probs, dim=0, index=assigned_scores.nonzero().squeeze(1)) tail_pointer_probs = torch.index_select( tail_pointer_probs, dim=0, index=assigned_scores.nonzero().squeeze(1)) if args.EMD: assigned_head_positions = to_one_hot( assigned_head_positions, args.seq_len) assigned_tail_positions = to_one_hot( assigned_tail_positions, args.seq_len) prediction_head_loss = EMD_L2(head_pointer_probs, assigned_head_positions, needSoftMax=True) prediction_tail_loss = EMD_L2(tail_pointer_probs, assigned_tail_positions, needSoftMax=True) else: prediction_head_loss = F.cross_entropy( head_pointer_probs, assigned_head_positions) prediction_tail_loss = F.cross_entropy( tail_pointer_probs, assigned_tail_positions) loc_losses.update( prediction_head_loss.data.item() + prediction_tail_loss.data.item(), feature_batch.size(0)) total_loss = alpha * (prediction_head_loss + prediction_tail_loss) + cls_loss else: total_loss = cls_loss model_optim.zero_grad() total_loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.) model_optim.step() cls_losses.update(cls_loss.data.item(), feature_batch.size(0)) total_losses.update(total_loss.item(), feature_batch.size(0)) logger.info( "Train -- Epoch :{:06d}, LR: {:.6f},\tloss={:.4f}, \t c-loss:{:.4f}, \tloc-loss:{:.4f}\tcls-Accuracy:{:.4f}\tloc-Avg-IOU:{:.4f}\t topIOU:{:.4f}" .format(epoch, model_optim.param_groups[0]['lr'], total_losses.avg, cls_losses.avg, loc_losses.avg, Accuracy.avg, IOU.avg, ordered_IOU.avg)) optim_scheduler.step(total_losses.avg) model.eval() # IOU = AverageMeter() # pbar = progressbar.ProgressBar(max_value=len(val_evaluator)) # for i_batch, sample_batch in enumerate(val_dataloader): # pbar.update(i_batch) # feature_batch = Variable(sample_batch[0]) # start_indices = Variable(sample_batch[1]) # end_indices = Variable(sample_batch[2]) # gt_valids = Variable(sample_batch[3]) # # valid_indices = Variable(sample_batch[3]) # if use_cuda: # feature_batch = feature_batch.cuda() # start_indices = start_indices.cuda() # end_indices = end_indices.cuda() # gt_positions = torch.stack([start_indices, end_indices], dim=-1) # head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, _ = model( # feature_batch)#Update: compared to the previous version, we now update the matching rules # pred_positions = torch.stack([head_positions, tail_positions], dim=-1) # pred_scores = cls_scores[:, :, -1] # #TODO: should NOT change here for evaluation! # assigned_scores, assigned_locations, total_valid, total_iou = h_match.Assign_Batch_v2(gt_positions, pred_positions, gt_valids, thres=hassign_thres) # if total_valid>0: # IOU.update(total_iou / total_valid, total_valid) val_F1s = val_evaluator.Evaluate(model) test_F1s = test_evaluator.Evaluate(model) logger.info("Val -- Epoch :{:06d}, LR: {:.6f},\tF1s:{:.4f}".format( epoch, model_optim.param_groups[0]['lr'], val_F1s)) logger.info("Test -- Epoch :{:06d},\tF1s:{:.4f}".format( epoch, test_F1s)) if epoch % 1 == 0: save_checkpoint( { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'loss': total_losses.avg, 'cls_loss': cls_losses.avg, 'loc_loss': loc_losses.avg, 'IoU': IOU.avg, 'val_F1s': val_F1s, 'test_F1s': test_F1s }, (epoch + 1), file_direcotry=save_directory)
def main(): global args args = (parser.parse_args()) use_cuda = cuda_model.ifUseCuda(args.gpu_id, args.multiGpu) script_name_stem = dir_utils.get_stem(__file__) if args.resume is None: save_directory = dir_utils.get_dir(os.path.join(project_root, 'ckpts', '{:s}'.format(args.dataset), '{:s}-{:s}-assgin{:.2f}-alpha{:.4f}-dim{:d}-dropout{:.4f}-seqlen{:d}-{:s}-{:s}'. format(script_name_stem, args.sufix, args.hassign_thres, args.alpha, args.hidden_dim, args.dropout, args.seq_len, 'L2', match_type[args.hmatch]))) else: save_directory = args.resume log_file = os.path.join(save_directory, 'log-{:s}.txt'.format(dir_utils.get_date_str())) logger = log_utils.get_logger(log_file) log_utils.print_config(vars(args), logger) model = PointerNetwork(input_dim=args.input_dim, embedding_dim=args.embedding_dim, hidden_dim=args.hidden_dim, max_decoding_len=args.net_outputs, dropout=args.dropout, n_enc_layers=2, output_classes=2) logger.info("Number of Params\t{:d}".format(sum([p.data.nelement() for p in model.parameters()]))) logger.info('Saving logs to {:s}'.format(log_file)) if args.resume is not None: ckpt_idx = args.fileid ckpt_filename = os.path.join(args.resume, 'checkpoint_{:04d}.pth.tar'.format(ckpt_idx)) assert os.path.isfile(ckpt_filename), 'Error: no checkpoint directory found!' checkpoint = torch.load(ckpt_filename, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint['state_dict'], strict=False) train_iou = checkpoint['IoU'] args.start_epoch = checkpoint['epoch'] logger.info("=> loading checkpoint '{}', current iou: {:.04f}".format(ckpt_filename, train_iou)) model = cuda_model.convertModel2Cuda(model, gpu_id=args.gpu_id, multiGpu=args.multiGpu) train_dataset = cDataset(dataset_split='train', seq_length=args.seq_len, sample_rate=[4], rdOffset=True, rdDrop=True) val_dataset =cDataset(dataset_split='val', seq_length=args.seq_len, sample_rate=[4], rdDrop=False, rdOffset=False) train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4) model_optim = optim.Adam(filter(lambda p:p.requires_grad, model.parameters()), lr=float(args.lr)) optim_scheduler = optim.lr_scheduler.ReduceLROnPlateau(model_optim, 'min', patience=10) cls_weights = torch.FloatTensor([0.05, 1.0]).cuda() # cls_weights = None widgets = ['Test: ', ' -- [ ', progressbar.Counter(), '|', str(len(train_dataloader)), ' ] ', progressbar.Bar(), ' cls loss: ', progressbar.FormatLabel(''), ' loc loss: ', progressbar.FormatLabel(''), ' IoU : ', progressbar.FormatLabel(''), ' (', progressbar.ETA(), ' ) '] # bar = progressbar.ProgressBar(max_value=step_per_epoch, widgets=widgets) # bar.start() for epoch in range(args.start_epoch, args.nof_epoch+args.start_epoch): total_losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() matched_IOU = AverageMeter() true_IOU = AverageMeter() model.train() pbar = progressbar.ProgressBar(max_value=len(train_dataloader), widgets=widgets) pbar.start() for i_batch, sample_batch in enumerate(train_dataloader): # pbar.update(i_batch) feature_batch = Variable(sample_batch[0]) start_indices = Variable(sample_batch[1]) end_indices = Variable(sample_batch[2]) gt_valids = Variable(sample_batch[3]) # gt_overlaps = Variable(sample_batch[4]) # seq_labels = Variable(sample_batch[3]) if use_cuda: feature_batch = feature_batch.cuda() start_indices = start_indices.cuda() end_indices = end_indices.cuda() gt_positions = torch.stack([start_indices, end_indices], dim=-1) head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, _ = model(feature_batch) pred_positions = torch.stack([head_positions, tail_positions], dim=-1) # pred_scores = F.sigmoid(cls_scores) if args.hmatch: assigned_scores, assigned_locations, total_valid, total_iou = h_match.Assign_Batch_v2(gt_positions, pred_positions, gt_valids, thres=args.hassign_thres) else: #FIXME: do it later! assigned_scores, assigned_locations, total_valid, total_iou = f_match.Assign_Batch_v2(gt_positions, pred_positions, gt_valids, thres=args.hassign_thres) # _, _, total_valid, total_iou = h_match.Assign_Batch_v2(gt_positions, pred_positions, gt_valids, thres=args.hassign_thres) true_valid, true_iou = h_match.totalMatch_Batch(gt_positions, pred_positions, gt_valids) assert true_valid == total_valid, 'WRONG' if total_valid>0: matched_IOU.update(total_iou / total_valid, total_valid) true_IOU.update(true_iou/total_valid, total_valid) assigned_scores = Variable(torch.LongTensor(assigned_scores),requires_grad=False) # assigned_overlaps = Variable(torch.FloatTensor(assigned_overlaps), requires_grad=False) assigned_locations = Variable(torch.LongTensor(assigned_locations), requires_grad=False) if use_cuda: assigned_scores = assigned_scores.cuda() assigned_locations = assigned_locations.cuda() # assigned_overlaps = assigned_overlaps.cuda() # pred_scores = pred_scores.contiguous().view(-1) # assigned_scores = assigned_scores.contiguous().view(-1) # assigned_overlaps = assigned_overlaps.contiguous().view(-1) # cls_loss = ClsLocLoss2_OneClsRegression(pred_scores, assigned_scores, assigned_overlaps) cls_scores = cls_scores.contiguous().view(-1, cls_scores.size()[-1]) assigned_scores = assigned_scores.contiguous().view(-1) cls_loss = F.cross_entropy(cls_scores, assigned_scores, weight=cls_weights) if total_valid>0: assigned_head_positions = assigned_locations[:,:,0] assigned_head_positions = assigned_head_positions.contiguous().view(-1) # assigned_tail_positions = assigned_locations[:,:,1] assigned_tail_positions = assigned_tail_positions.contiguous().view(-1) head_pointer_probs = head_pointer_probs.contiguous().view(-1, head_pointer_probs.size()[-1]) tail_pointer_probs = tail_pointer_probs.contiguous().view(-1, tail_pointer_probs.size()[-1]) assigned_head_positions = torch.masked_select(assigned_head_positions, assigned_scores.byte()) assigned_tail_positions = torch.masked_select(assigned_tail_positions, assigned_scores.byte()) head_pointer_probs = torch.index_select(head_pointer_probs, dim=0, index=assigned_scores.nonzero().squeeze(1)) tail_pointer_probs = torch.index_select(tail_pointer_probs, dim=0, index=assigned_scores.nonzero().squeeze(1)) # if args.EMD: assigned_head_positions = to_one_hot(assigned_head_positions, args.seq_len) assigned_tail_positions = to_one_hot(assigned_tail_positions, args.seq_len) prediction_head_loss = Simple_L2(head_pointer_probs, assigned_head_positions, needSoftMax=True) prediction_tail_loss = Simple_L2(tail_pointer_probs, assigned_tail_positions, needSoftMax=True) # else: # prediction_head_loss = F.cross_entropy(head_pointer_probs, assigned_head_positions) # prediction_tail_loss = F.cross_entropy(tail_pointer_probs, assigned_tail_positions) loc_losses.update(prediction_head_loss.data.item() + prediction_tail_loss.data.item(), total_valid)#FIXME total_loss = args.alpha*(prediction_head_loss + prediction_tail_loss) + cls_loss else: total_loss = cls_loss model_optim.zero_grad() total_loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.) model_optim.step() cls_losses.update(cls_loss.data.item(), feature_batch.size(0)) total_losses.update(total_loss.item(), feature_batch.size(0)) widgets[-8] = progressbar.FormatLabel('{:04.4f}'.format(cls_losses.avg)) widgets[-6] = progressbar.FormatLabel('{:04.4f}'.format(loc_losses.avg)) widgets[-4] = progressbar.FormatLabel('{:01.4f}'.format(matched_IOU.avg)) pbar.update(i_batch) logger.info( "Train -- Epoch :{:06d}, LR: {:.6f},\tloss={:.4f}, \t c-loss:{:.4f}, \tloc-loss:{:.4f}\tAvg-matched_IOU:{:.4f}\t Avg-true-IOU:{:.4f}".format( epoch, model_optim.param_groups[0]['lr'], total_losses.avg, cls_losses.avg, loc_losses.avg, matched_IOU.avg, true_IOU.avg)) train_iou = matched_IOU.avg optim_scheduler.step(total_losses.avg) model.eval() matched_IOU = AverageMeter() pbar = progressbar.ProgressBar(max_value=len(val_dataloader)) for i_batch, sample_batch in enumerate(val_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batch[0]) start_indices = Variable(sample_batch[1]) end_indices = Variable(sample_batch[2]) gt_valids = Variable(sample_batch[3]) # valid_indices = Variable(sample_batch[3]) if use_cuda: feature_batch = feature_batch.cuda() start_indices = start_indices.cuda() end_indices = end_indices.cuda() gt_positions = torch.stack([start_indices, end_indices], dim=-1) head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, _ = model( feature_batch) pred_positions = torch.stack([head_positions, tail_positions], dim=-1) # assigned_scores, assigned_locations, total_valid, total_iou = h_match.Assign_Batch_eval(gt_positions, pred_positions, gt_valids, thres=args.hassign_thres) #FIXME matched_valid, matched_iou = h_match.totalMatch_Batch(gt_positions, pred_positions, gt_valids) if matched_valid>0: matched_IOU.update(matched_iou / matched_valid, matched_valid) logger.info( "Val -- Epoch :{:06d}, LR: {:.6f},\tloc-Avg-matched_IOU:{:.4f}".format( epoch,model_optim.param_groups[0]['lr'], matched_IOU.avg, )) if epoch % 1 == 0 : save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'loss':total_losses.avg, 'cls_loss': cls_losses.avg, 'loc_loss': loc_losses.avg, 'train-IOU':train_iou, 'IoU': matched_IOU.avg}, (epoch+1), file_direcotry=save_directory)
def main(): global args args = (parser.parse_args()) use_cuda = cuda_model.ifUseCuda(args.gpu_id, args.multiGpu) # Pretty print the run args pp.pprint(vars(args)) model = PointerNetwork(input_dim=args.input_dim, embedding_dim=args.embedding_dim, hidden_dim=args.hidden_dim, max_decoding_len=args.net_outputs, dropout=args.dropout, n_enc_layers=2) hassign_thres = args.hassign_thres print("Number of Params\t{:d}".format(sum([p.data.nelement() for p in model.parameters()]))) script_name_stem = dir_utils.get_stem(__file__) save_directory = os.path.join(project_root, 'ckpts', '{:s}-assgin{:.2f}-alpha{:.4f}-dim{:d}-dropout{:.4f}-seqlen{:d}-ckpt'. format(script_name_stem, hassign_thres, args.alpha, args.hidden_dim, args.dropout, args.seq_len)) print("Save ckpt to {:s}".format(save_directory)) if args.resume is not None: ckpt_idx = 3 ckpt_filename = args.resume.format(ckpt_idx) assert os.path.isfile(ckpt_filename), 'Error: no checkpoint directory found!' checkpoint = torch.load(ckpt_filename, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint['state_dict'], strict=False) train_iou = checkpoint['IoU'] args.start_epoch = checkpoint['epoch'] print("=> loading checkpoint '{}', current iou: {:.04f}".format(ckpt_filename, train_iou)) model = cuda_model.convertModel2Cuda(model, gpu_id=args.gpu_id, multiGpu=args.multiGpu) train_dataset = cDataset(seq_length=args.seq_len, overlap=0.9, sample_rate=[4], dataset_split='train', rdDrop=True, rdOffset=True) val_dataset = cDataset(seq_length=args.seq_len, overlap=0.9, sample_rate=[4], dataset_split='val', rdDrop=False, rdOffset=False) train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4) model_optim = optim.Adam(filter(lambda p:p.requires_grad, model.parameters()), lr=float(args.lr)) optim_scheduler = optim.lr_scheduler.ReduceLROnPlateau(model_optim, 'min', patience=20) alpha=args.alpha cls_weights = torch.FloatTensor([0.05, 1.0]).cuda() for epoch in range(args.start_epoch, args.nof_epoch+args.start_epoch): total_losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() Accuracy = AverageMeter() IOU = AverageMeter() ordered_IOU = AverageMeter() model.train() pbar = progressbar.ProgressBar(max_value=len(train_dataloader)) for i_batch, sample_batch in enumerate(train_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batch[0]) start_indices = Variable(sample_batch[1]) end_indices = Variable(sample_batch[2]) gt_valids = Variable(sample_batch[3]) if use_cuda: feature_batch = feature_batch.cuda() start_indices = start_indices.cuda() end_indices = end_indices.cuda() gt_positions = torch.stack([start_indices, end_indices], dim=-1) head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, _ = model(feature_batch) pred_positions = torch.stack([head_positions, tail_positions], dim=-1) assigned_scores, assigned_locations = h_assign.Assign_Batch(gt_positions, pred_positions, gt_valids, thres=hassign_thres) # if np.sum(assigned_scores) > 1: # print("DEBUG") # correct_predictions = np.sum(assigned_scores[:,:args.n_outputs]) # cls_rate = correct_predictions*1./np.sum(assigned_scores) if np.sum(assigned_scores)>=1: iou_rate, effective_positives = Metrics.get_avg_iou2(np.reshape(pred_positions.data.cpu().numpy(), (-1, 2)), np.reshape(assigned_locations, (-1, 2)), np.reshape(assigned_scores, assigned_scores.shape[ 0] * assigned_scores.shape[ 1])) IOU.update(iou_rate/(effective_positives), effective_positives) # ordered_IOU.update(ordered_iou_rate/(args.batch_size*args.n_outputs),args.batch_size*args.n_outputs) # n_effective_batches += 1 assigned_scores = Variable(torch.LongTensor(assigned_scores),requires_grad=False) assigned_locations = Variable(torch.LongTensor(assigned_locations), requires_grad=False) if use_cuda: assigned_scores = assigned_scores.cuda() assigned_locations = assigned_locations.cuda() cls_scores = cls_scores.contiguous().view(-1, cls_scores.size()[-1]) assigned_scores = assigned_scores.contiguous().view(-1) cls_loss = F.cross_entropy(cls_scores, assigned_scores, weight=cls_weights) if torch.sum(assigned_scores)>0: # print("HAHA") assigned_head_positions = assigned_locations[:,:,0] assigned_head_positions = assigned_head_positions.contiguous().view(-1) # assigned_tail_positions = assigned_locations[:,:,1] assigned_tail_positions = assigned_tail_positions.contiguous().view(-1) head_pointer_probs = head_pointer_probs.contiguous().view(-1, head_pointer_probs.size()[-1]) tail_pointer_probs = tail_pointer_probs.contiguous().view(-1, tail_pointer_probs.size()[-1]) # mask here: if there is non in assigned scores, no need to compute ... assigned_head_positions = torch.masked_select(assigned_head_positions, assigned_scores.byte()) assigned_tail_positions = torch.masked_select(assigned_tail_positions, assigned_scores.byte()) head_pointer_probs = torch.index_select(head_pointer_probs, dim=0, index=assigned_scores.nonzero().squeeze(1)) tail_pointer_probs = torch.index_select(tail_pointer_probs, dim=0, index=assigned_scores.nonzero().squeeze(1)) assigned_head_positions = to_one_hot(assigned_head_positions, args.seq_len) assigned_tail_positions = to_one_hot(assigned_tail_positions, args.seq_len) prediction_head_loss = EMD_L2(head_pointer_probs, assigned_head_positions, needSoftMax=True) prediction_tail_loss = EMD_L2(tail_pointer_probs, assigned_tail_positions, needSoftMax=True) loc_losses.update(prediction_head_loss.data.item() + prediction_tail_loss.data.item(), feature_batch.size(0)) total_loss = alpha * (prediction_head_loss + prediction_tail_loss) + cls_loss else: total_loss = cls_loss model_optim.zero_grad() total_loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.) model_optim.step() cls_losses.update(cls_loss.data.item(), feature_batch.size(0)) total_losses.update(total_loss.item(), feature_batch.size(0)) print( "Train -- Epoch :{:06d}, LR: {:.6f},\tloss={:.4f}, \t c-loss:{:.4f}, \tloc-loss:{:.4f}\tcls-Accuracy:{:.4f}\tloc-Avg-IOU:{:.4f}\t topIOU:{:.4f}".format( epoch, model_optim.param_groups[0]['lr'], total_losses.avg, cls_losses.avg, loc_losses.avg, Accuracy.avg, IOU.avg, ordered_IOU.avg)) optim_scheduler.step(total_losses.avg) model.eval() total_losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() Accuracy = AverageMeter() IOU = AverageMeter() ordered_IOU = AverageMeter() pbar = progressbar.ProgressBar(max_value=len(val_dataloader)) for i_batch, sample_batch in enumerate(val_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batch[0]) start_indices = Variable(sample_batch[1]) end_indices = Variable(sample_batch[2]) gt_valids = Variable(sample_batch[3]) # valid_indices = Variable(sample_batch[3]) if use_cuda: feature_batch = feature_batch.cuda() start_indices = start_indices.cuda() end_indices = end_indices.cuda() gt_positions = torch.stack([start_indices, end_indices], dim=-1) head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, _ = model( feature_batch) pred_positions = torch.stack([head_positions, tail_positions], dim=-1) assigned_scores, assigned_locations = h_assign.Assign_Batch(gt_positions, pred_positions, gt_valids, thres=hassign_thres) # if np.sum(assigned_scores) > 1: # print("DEBUG") # correct_predictions = np.sum(assigned_scores[:,:args.n_outputs]) # cls_rate = correct_predictions*1./np.sum(assigned_scores) if np.sum(assigned_scores) >= 1: iou_rate, effective_positives = Metrics.get_avg_iou2( np.reshape(pred_positions.data.cpu().numpy(), (-1, 2)), np.reshape(assigned_locations, (-1, 2)), np.reshape(assigned_scores, assigned_scores.shape[ 0] * assigned_scores.shape[ 1])) IOU.update(iou_rate / (effective_positives), effective_positives) assigned_scores = Variable(torch.LongTensor(assigned_scores), requires_grad=False) assigned_locations = Variable(torch.LongTensor(assigned_locations), requires_grad=False) if use_cuda: assigned_scores = assigned_scores.cuda() assigned_locations = assigned_locations.cuda() cls_scores = cls_scores.contiguous().view(-1, cls_scores.size()[-1]) assigned_scores = assigned_scores.contiguous().view(-1) cls_loss = F.cross_entropy(cls_scores, assigned_scores, weight=cls_weights) if torch.sum(assigned_scores)>0: # print("HAHA") assigned_head_positions = assigned_locations[:,:,0] assigned_head_positions = assigned_head_positions.contiguous().view(-1) # assigned_tail_positions = assigned_locations[:,:,1] assigned_tail_positions = assigned_tail_positions.contiguous().view(-1) head_pointer_probs = head_pointer_probs.contiguous().view(-1, head_pointer_probs.size()[-1]) tail_pointer_probs = tail_pointer_probs.contiguous().view(-1, tail_pointer_probs.size()[-1]) # mask here: if there is non in assigned scores, no need to compute ... assigned_head_positions = torch.masked_select(assigned_head_positions, assigned_scores.byte()) assigned_tail_positions = torch.masked_select(assigned_tail_positions, assigned_scores.byte()) head_pointer_probs = torch.index_select(head_pointer_probs, dim=0, index=assigned_scores.nonzero().squeeze(1)) tail_pointer_probs = torch.index_select(tail_pointer_probs, dim=0, index=assigned_scores.nonzero().squeeze(1)) assigned_head_positions = to_one_hot(assigned_head_positions, args.seq_len) assigned_tail_positions = to_one_hot(assigned_tail_positions, args.seq_len) prediction_head_loss = EMD_L2(head_pointer_probs, assigned_head_positions, needSoftMax=True) prediction_tail_loss = EMD_L2(tail_pointer_probs, assigned_tail_positions, needSoftMax=True) loc_losses.update(prediction_head_loss.data.item() + prediction_tail_loss.data.item(), feature_batch.size(0)) total_loss = alpha * (prediction_head_loss + prediction_tail_loss) + cls_loss else: total_loss = cls_loss cls_losses.update(cls_loss.data.item(), feature_batch.size(0)) total_losses.update(total_loss.item(), feature_batch.size(0)) print( "Val -- Epoch :{:06d}, LR: {:.6f},\tloss={:.4f}, \t c-loss:{:.4f}, \tloc-loss:{:.4f}\tcls-Accuracy:{:.4f}\tloc-Avg-IOU:{:.4f}\t topIOU:{:.4f}".format( epoch, model_optim.param_groups[0]['lr'], total_losses.avg, cls_losses.avg, loc_losses.avg, Accuracy.avg, IOU.avg, ordered_IOU.avg)) if epoch % 1 == 0: save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'loss':total_losses.avg, 'cls_loss': cls_losses.avg, 'loc_loss': loc_losses.avg, 'IoU': IOU.avg}, (epoch+1), file_direcotry=save_directory)
def main(): global args args = (parser.parse_args()) use_cuda = cuda_model.ifUseCuda(args.gpu_id, args.multiGpu) # Pretty print the run args pp.pprint(vars(args)) model = PointerNetwork(input_dim=args.input_dim, embedding_dim=args.embedding_dim, hidden_dim=args.hidden_dim, max_decoding_len=args.net_outputs, dropout=args.dropout, n_enc_layers=2) hassign_thres = args.hassign_thres print("Number of Params\t{:d}".format( sum([p.data.nelement() for p in model.parameters()]))) script_name_stem = dir_utils.get_stem(__file__) save_directory = '{:s}-assgin{:.2f}-alpha{:.4f}-dim{:d}-dropout{:.4f}-ckpt'.format( script_name_stem, hassign_thres, args.alpha, args.hidden_dim, args.dropout) print("Save ckpt to {:s}".format(save_directory)) if args.resume is not None: ckpt_idx = 7 ckpt_filename = args.resume.format(ckpt_idx) assert os.path.isfile( ckpt_filename), 'Error: no checkpoint directory found!' checkpoint = torch.load(ckpt_filename, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint['state_dict'], strict=False) train_iou = checkpoint['IoU'] args.start_epoch = checkpoint['epoch'] print("=> loading checkpoint '{}', current iou: {:.04f}".format( ckpt_filename, train_iou)) model = cuda_model.convertModel2Cuda(model, gpu_id=args.gpu_id, multiGpu=args.multiGpu) # train_dataset = THUMOST14(seq_length=args.seq_len, overlap=0.9, sample_rate=[4], dataset_split='train',rdDrop=True,rdOffset=True) val_dataset = THUMOST14(seq_length=args.seq_len, overlap=0.9, sample_rate=[4], dataset_split='val', rdDrop=False, rdOffset=False) # train_dataloader = DataLoader(train_dataset, # batch_size=args.batch_size, # shuffle=True, # num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4) model_optim = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=float(args.lr)) optim_scheduler = optim.lr_scheduler.ReduceLROnPlateau(model_optim, 'min', patience=20) alpha = args.alpha cls_weights = torch.FloatTensor([0.05, 1.0]).cuda() for epoch in range(args.start_epoch, args.nof_epoch + args.start_epoch): total_losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() Accuracy = AverageMeter() IOU = AverageMeter() ordered_IOU = AverageMeter() model.train() pbar = progressbar.ProgressBar(max_value=len(val_dataloader)) for i_batch, sample_batch in enumerate(val_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batch[0]) start_indices = Variable(sample_batch[1]) end_indices = Variable(sample_batch[2]) gt_valids = Variable(sample_batch[3]) if use_cuda: feature_batch = feature_batch.cuda() start_indices = start_indices.cuda() end_indices = end_indices.cuda() gt_positions = torch.stack([start_indices, end_indices], dim=-1) head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, _ = model( feature_batch) pred_positions = torch.stack([head_positions, tail_positions], dim=-1) assigned_scores, assigned_locations = h_assign.Assign_Batch( gt_positions, pred_positions, gt_valids, thres=hassign_thres) if np.sum(assigned_scores) > 0: print "Output at {:d}".format(i_batch) # n_valid = valid_indices.data[0, 0] # view_idx = valid_indices.nonzero()[0][0].item() # n_valid = valid_indices[view_idx, 0].item() print "GT:" print(assigned_locations[0]) print("Pred") print(pred_positions[0]) _, head_sort = head_pointer_probs[0, 0, :].sort() _, tail_sort = tail_pointer_probs[0, 0, :].sort() print("END of {:d}".format(i_batch))
def main(): global args args = (parser.parse_args()) use_cuda = cuda_model.ifUseCuda(args.gpu_id, args.multiGpu) script_name_stem = dir_utils.get_stem(__file__) save_directory = dir_utils.get_dir( os.path.join( project_root, 'ckpts', 'Delete-{:s}-assgin{:.2f}-alpha{:.4f}-dim{:d}-dropout{:.4f}-seqlen{:d}-{:s}-{:s}' .format(script_name_stem, args.hassign_thres, args.alpha, args.hidden_dim, args.dropout, args.seq_len, loss_type[args.EMD], match_type[args.hmatch]))) log_file = os.path.join(save_directory, 'log-{:s}.txt'.format(dir_utils.get_date_str())) logger = chinese_utils.get_logger(log_file) chinese_utils.print_config(vars(args), logger) model = PointerNetwork(input_dim=args.input_dim, embedding_dim=args.embedding_dim, hidden_dim=args.hidden_dim, max_decoding_len=args.net_outputs, dropout=args.dropout, n_enc_layers=2) hassign_thres = args.hassign_thres logger.info("Number of Params\t{:d}".format( sum([p.data.nelement() for p in model.parameters()]))) logger.info('Saving logs to {:s}'.format(log_file)) if args.resume is not None: ckpt_idx = 48 ckpt_filename = args.resume.format(ckpt_idx) assert os.path.isfile( ckpt_filename), 'Error: no checkpoint directory found!' checkpoint = torch.load(ckpt_filename, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint['state_dict'], strict=False) train_iou = checkpoint['IoU'] args.start_epoch = checkpoint['epoch'] logger.info("=> loading checkpoint '{}', current iou: {:.04f}".format( ckpt_filename, train_iou)) model = cuda_model.convertModel2Cuda(model, gpu_id=args.gpu_id, multiGpu=args.multiGpu) train_dataset = cDataset(dataset_split='train') val_dataset = cDataset(dataset_split='val') train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4) model_optim = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=float(args.lr)) optim_scheduler = optim.lr_scheduler.ReduceLROnPlateau(model_optim, 'min', patience=10) alpha = args.alpha # cls_weights = torch.FloatTensor([0.05, 1.0]).cuda() for epoch in range(args.start_epoch, args.nof_epoch + args.start_epoch): total_losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() Accuracy = AverageMeter() IOU = AverageMeter() ordered_IOU = AverageMeter() model.train() pbar = progressbar.ProgressBar(max_value=len(train_dataloader)) for i_batch, sample_batch in enumerate(train_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batch[0]) start_indices = Variable(sample_batch[1]) end_indices = Variable(sample_batch[2]) gt_valids = Variable(sample_batch[3]) # seq_labels = Variable(sample_batch[4]) if use_cuda: feature_batch = feature_batch.cuda() start_indices = start_indices.cuda() end_indices = end_indices.cuda() gt_positions = torch.stack([start_indices, end_indices], dim=-1) head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, _ = model( feature_batch) pred_positions = torch.stack([head_positions, tail_positions], dim=-1) if args.hmatch: assigned_scores, assigned_locations, total_valid, total_iou = h_match.Assign_Batch_v2( gt_positions, pred_positions, gt_valids, thres=hassign_thres) IOU.update(total_iou / total_valid, total_valid) else: assigned_scores, assigned_locations = f_match.Assign_Batch( gt_positions, pred_positions, gt_valids, thres=hassign_thres) _, _, total_valid, total_iou = h_match.Assign_Batch_v2( gt_positions, pred_positions, gt_valids, thres=hassign_thres) IOU.update(total_iou / total_valid, total_valid) assigned_scores = Variable(torch.LongTensor(assigned_scores), requires_grad=False) assigned_locations = Variable(torch.LongTensor(assigned_locations), requires_grad=False) if use_cuda: assigned_scores = assigned_scores.cuda() assigned_locations = assigned_locations.cuda() cls_scores = cls_scores.contiguous().view(-1, cls_scores.size()[-1]) assigned_scores = assigned_scores.contiguous().view(-1) cls_loss = F.cross_entropy(cls_scores, assigned_scores) if total_valid > 0: assigned_head_positions = assigned_locations[:, :, 0] assigned_head_positions = assigned_head_positions.contiguous( ).view(-1) # assigned_tail_positions = assigned_locations[:, :, 1] assigned_tail_positions = assigned_tail_positions.contiguous( ).view(-1) head_pointer_probs = head_pointer_probs.contiguous().view( -1, head_pointer_probs.size()[-1]) tail_pointer_probs = tail_pointer_probs.contiguous().view( -1, tail_pointer_probs.size()[-1]) assigned_head_positions = torch.masked_select( assigned_head_positions, assigned_scores.byte()) assigned_tail_positions = torch.masked_select( assigned_tail_positions, assigned_scores.byte()) head_pointer_probs = torch.index_select( head_pointer_probs, dim=0, index=assigned_scores.nonzero().squeeze(1)) tail_pointer_probs = torch.index_select( tail_pointer_probs, dim=0, index=assigned_scores.nonzero().squeeze(1)) if args.EMD: assigned_head_positions = to_one_hot( assigned_head_positions, args.seq_len) assigned_tail_positions = to_one_hot( assigned_tail_positions, args.seq_len) prediction_head_loss = EMD_L2(head_pointer_probs, assigned_head_positions, needSoftMax=True) prediction_tail_loss = EMD_L2(tail_pointer_probs, assigned_tail_positions, needSoftMax=True) else: prediction_head_loss = F.cross_entropy( head_pointer_probs, assigned_head_positions) prediction_tail_loss = F.cross_entropy( tail_pointer_probs, assigned_tail_positions) loc_losses.update( prediction_head_loss.data.item() + prediction_tail_loss.data.item(), feature_batch.size(0)) total_loss = alpha * (prediction_head_loss + prediction_tail_loss) + cls_loss else: total_loss = cls_loss model_optim.zero_grad() total_loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.) model_optim.step() cls_losses.update(cls_loss.data.item(), feature_batch.size(0)) total_losses.update(total_loss.item(), feature_batch.size(0)) logger.info( "Train -- Epoch :{:06d}, LR: {:.6f},\tloss={:.4f}, \t c-loss:{:.4f}, \tloc-loss:{:.4f}\tcls-Accuracy:{:.4f}\tloc-Avg-IOU:{:.4f}\t topIOU:{:.4f}" .format(epoch, model_optim.param_groups[0]['lr'], total_losses.avg, cls_losses.avg, loc_losses.avg, Accuracy.avg, IOU.avg, ordered_IOU.avg)) optim_scheduler.step(total_losses.avg) model.eval() IOU = AverageMeter() pbar = progressbar.ProgressBar(max_value=len(val_dataloader)) for i_batch, sample_batch in enumerate(val_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batch[0]) start_indices = Variable(sample_batch[1]) end_indices = Variable(sample_batch[2]) gt_valids = Variable(sample_batch[3]) # valid_indices = Variable(sample_batch[4]) if use_cuda: feature_batch = feature_batch.cuda() start_indices = start_indices.cuda() end_indices = end_indices.cuda() gt_positions = torch.stack([start_indices, end_indices], dim=-1) head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, _ = model( feature_batch ) #Update: compared to the previous version, we now update the matching rules pred_positions = torch.stack([head_positions, tail_positions], dim=-1) #TODO: should NOT change here for evaluation! assigned_scores, assigned_locations, total_valid, total_iou = h_match.Assign_Batch_eval( gt_positions, pred_positions, gt_valids, thres=hassign_thres) IOU.update(total_iou / total_valid, total_valid) logger.info( "Val -- Epoch :{:06d}, LR: {:.6f},\tloc-Avg-IOU:{:.4f}".format( epoch, model_optim.param_groups[0]['lr'], IOU.avg, )) if epoch % 1 == 0: save_checkpoint( { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'loss': total_losses.avg, 'cls_loss': cls_losses.avg, 'loc_loss': loc_losses.avg, 'IoU': IOU.avg }, (epoch + 1), file_direcotry=save_directory)
def main(): global args args = (parser.parse_args()) use_cuda = cuda_model.ifUseCuda(args.gpu_id, args.multiGpu) script_name_stem = dir_utils.get_stem(__file__) save_directory = dir_utils.get_dir(os.path.join(project_root, 'ckpts', '{:s}-assgin{:.2f}-alpha{:.4f}-dim{:d}-dropout{:.4f}-seqlen{:d}-{:s}-{:s}'. format(script_name_stem, args.hassign_thres, args.alpha, args.hidden_dim, args.dropout, args.seq_len, loss_type[args.EMD], match_type[args.hmatch]))) log_file = os.path.join(save_directory, 'log-{:s}.txt'.format(dir_utils.get_date_str())) logger = chinese_utils.get_logger(log_file) chinese_utils.print_config(vars(args), logger) model = BaseLSTMNetwork(input_dim=args.input_dim, embedding_dim=args.embedding_dim, hidden_dim=args.hidden_dim, max_decoding_len=args.net_outputs, dropout=args.dropout, n_enc_layers=2) hassign_thres = args.hassign_thres logger.info("Number of Params\t{:d}".format(sum([p.data.nelement() for p in model.parameters()]))) logger.info('Saving logs to {:s}'.format(log_file)) if args.resume is not None: ckpt_idx = 48 ckpt_filename = args.resume.format(ckpt_idx) assert os.path.isfile(ckpt_filename), 'Error: no checkpoint directory found!' checkpoint = torch.load(ckpt_filename, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint['state_dict'], strict=False) train_iou = checkpoint['IoU'] args.start_epoch = checkpoint['epoch'] logger.info("=> loading checkpoint '{}', current iou: {:.04f}".format(ckpt_filename, train_iou)) model = cuda_model.convertModel2Cuda(model, gpu_id=args.gpu_id, multiGpu=args.multiGpu) train_dataset = MNIST(dataset_split='train') val_dataset =MNIST(dataset_split='val') train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4) model_optim = optim.Adam(filter(lambda p:p.requires_grad, model.parameters()), lr=float(args.lr)) optim_scheduler = optim.lr_scheduler.ReduceLROnPlateau(model_optim, 'min', patience=10) alpha=args.alpha # cls_weights = torch.FloatTensor([0.05, 1.0]).cuda() for epoch in range(args.start_epoch, args.nof_epoch+args.start_epoch): total_losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() Accuracy = AverageMeter() IOU = AverageMeter() ordered_IOU = AverageMeter() model.train() pbar = progressbar.ProgressBar(max_value=len(train_dataloader)) for i_batch, sample_batch in enumerate(train_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batch[0]) labels = Variable(sample_batch[1]) if use_cuda: feature_batch = feature_batch.cuda() labels = labels.cuda() # end_indices = end_indices.cuda() pred_labels = model(feature_batch) labels = labels.contiguous().view(-1) pred_labels = pred_labels.contiguous().view(-1, pred_labels.size()[-1]) pred_probs = F.softmax(pred_labels, dim=1)[:, 1] pred_probs[pred_probs>0.5] = 1 pred_probs[pred_probs<=0.5] = -1 n_positives = torch.sum(labels).item() iou = torch.sum(pred_probs==labels.float()).item()*1. / n_positives IOU.update(iou, 1.) total_loss = F.cross_entropy(pred_labels, labels) model_optim.zero_grad() total_loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.) model_optim.step() # cls_losses.update(cls_loss.data.item(), feature_batch.size(0)) total_losses.update(total_loss.item(), feature_batch.size(0)) logger.info( "Train -- Epoch :{:06d}, LR: {:.6f},\tloss={:.4f}, \t c-loss:{:.4f}, \tloc-loss:{:.4f}\tcls-Accuracy:{:.4f}\tloc-Avg-IOU:{:.4f}\t topIOU:{:.4f}".format( epoch, model_optim.param_groups[0]['lr'], total_losses.avg, cls_losses.avg, loc_losses.avg, Accuracy.avg, IOU.avg, ordered_IOU.avg)) optim_scheduler.step(total_losses.avg) model.eval() IOU = AverageMeter() pbar = progressbar.ProgressBar(max_value=len(val_dataloader)) for i_batch, sample_batch in enumerate(val_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batch[0]) labels = Variable(sample_batch[1]) if use_cuda: feature_batch = feature_batch.cuda() labels = labels.cuda() labels = labels.contiguous().view(-1) pred_labels = model(feature_batch) pred_labels = pred_labels.contiguous().view(-1, pred_labels.size()[-1]) pred_probs = F.softmax(pred_labels, dim=1)[:, 1] n_positives = torch.sum(labels).item() pred_probs[pred_probs > 0.5] = 1 pred_probs[pred_probs <= 0.5] = -1 iou = torch.sum(pred_probs == labels.float()).item() * 1. / n_positives IOU.update(iou, 1.) logger.info( "Val -- Epoch :{:06d}, LR: {:.6f},\tloc-Avg-IOU:{:.4f}".format( epoch,model_optim.param_groups[0]['lr'], IOU.avg, )) if epoch % 1 == 0: save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'loss':total_losses.avg, 'cls_loss': cls_losses.avg, 'loc_loss': loc_losses.avg, 'IoU': IOU.avg}, (epoch+1), file_direcotry=save_directory)
def main(): # load data sets global args args = parser.parse_args() pp.pprint(vars(args)) running_name = 'X' use_cuda = cuda_model.ifUseCuda(args.gpu_id, args.multiGpu) # use_cuda = False train_file = 'data/example.train' dev_file = 'data/example.dev' test_file = 'data/example.test' embedding_file = 'data/vec.txt' map_file = 'map.pkl' config_file = 'config_file_pytorch' tag_file = 'tag.pkl' embedding_easy_file = 'data/easy_embedding.npy' train_sentences = load_sentences(train_file) dev_sentences = load_sentences(dev_file) test_sentences = load_sentences(test_file) # train_sentences = dev_sentences update_tag_scheme(train_sentences, args.tag_schema) update_tag_scheme(test_sentences, args.tag_schema) update_tag_scheme(dev_sentences, args.tag_schema) if not os.path.isfile(tag_file): _, tag_to_id, id_to_tag = tag_mapping(train_sentences) with open(tag_file, "wb") as f: pickle.dump([tag_to_id, id_to_tag], f) else: with open(tag_file, 'rb') as t: tag_to_id, id_to_tag = pickle.load(t) if not os.path.isfile(map_file): # create dictionary for word dico_chars_train = char_mapping(train_sentences)[0] dico_chars, char_to_id, id_to_char = augment_with_pretrained( dico_chars_train.copy(), embedding_file, list( itertools.chain.from_iterable([[w[0] for w in s] for s in test_sentences]))) # _, tag_to_id, id_to_tag = tag_mapping(train_sentences) with open(map_file, "wb") as f: pickle.dump([char_to_id, id_to_char], f) else: with open(map_file, "rb") as f: char_to_id, id_to_char = pickle.load(f) # prepare data, get a collection of list containing index train_data = prepare_dataset(train_sentences, char_to_id, tag_to_id) dev_data = prepare_dataset(dev_sentences, char_to_id, tag_to_id) test_data = prepare_dataset(test_sentences, char_to_id, tag_to_id) print("%i / %i / %i sentences in train / dev / test." % (len(train_data), len(dev_data), len(test_data))) train_manager = BatchManager(train_data, args.batch_size) dev_manager = BatchManager(dev_data, 50) test_manager = BatchManager(test_data, 50) # make path for store log and model if not exist # make_path(FLAGS) if os.path.isfile(config_file): config = load_config(config_file) else: config = config_model(char_to_id, tag_to_id, args) save_config(config, config_file) # make_path(running_name) save_places = dir_utils.save_places(running_name) # log_path = os.path.join("log", FLAGS.log_file) logger = get_logger( os.path.join(save_places.log_save_dir, '{:s}.txt'.format(dir_utils.get_date_str()))) print_config(config, logger) logger.info("start training") # loss = [] #Update: create model and embedding! model = NERModel.CNERPointer(char_dim=args.char_dim, seg_dim=args.seg_dim, hidden_dim=args.hidden_dim, max_length=15, embedding_path=embedding_file, id_to_word=id_to_char, easy_load=embedding_easy_file) print("Number of Params\t{:d}".format( sum([p.data.nelement() for p in model.parameters()]))) #Update: this won't work! # model = cuda_model.convertModel2Cuda(model, gpu_id=args.gpu_id, multiGpu=args.multiGpu) if use_cuda: model = model.cuda() model_optim = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=float(args.lr)) optim_scheduler = optim.lr_scheduler.ReduceLROnPlateau(model_optim, 'min', patience=10) for epoch in range(args.start_epoch, args.nof_epoch + args.start_epoch): total_losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() Accuracy = AverageMeter() IOU = AverageMeter() ordered_IOU = AverageMeter() model.train() pbar = progressbar.ProgressBar(max_value=train_manager.len_data) for batch_idx, batch in enumerate( train_manager.iter_batch(shuffle=True)): pbar.update(batch_idx) word_vectors = torch.LongTensor(batch[1]) seg_vectors = torch.LongTensor(batch[2]) batch_size = word_vectors.shape[0] input_length = word_vectors.shape[1] word_input = Variable(word_vectors) seg_input = Variable(seg_vectors) if use_cuda: word_input = word_input.cuda() seg_input = seg_input.cuda() tagging_BIOUS = batch[3] segments, max_len = convertBIOU2SegmentsBatch( tagging_BIOUS, id_to_tag) gt_positions, gt_valids = createPytorchLabels(segments, max_len) head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, _ = model( word_input, seg_input, max_len) pred_positions = torch.stack([head_positions, tail_positions], dim=-1) assigned_scores, assigned_locations = h_assign.Assign_Batch( gt_positions, pred_positions, gt_valids, thres=args.hassign_thres) if np.sum(assigned_scores) >= 1: iou_rate, effective_positives = Metrics.get_avg_iou2( np.reshape(pred_positions.data.cpu().numpy(), (-1, 2)), np.reshape(assigned_locations, (-1, 2)), np.reshape( assigned_scores, assigned_scores.shape[0] * assigned_scores.shape[1])) IOU.update(iou_rate / (effective_positives), effective_positives) # ordered_IOU.update(ordered_iou_rate/(args.batch_size*args.n_outputs),args.batch_size*args.n_outputs) # n_effective_batches += 1 assigned_scores = Variable(torch.LongTensor(assigned_scores), requires_grad=False) assigned_locations = Variable(torch.LongTensor(assigned_locations), requires_grad=False) if use_cuda: assigned_scores = assigned_scores.cuda() assigned_locations = assigned_locations.cuda() cls_scores = cls_scores.contiguous().view(-1, cls_scores.size()[-1]) assigned_scores = assigned_scores.contiguous().view(-1) cls_loss = F.cross_entropy(cls_scores, assigned_scores) if torch.sum(assigned_scores) > 0: # print("HAHA") assigned_head_positions = assigned_locations[:, :, 0] assigned_head_positions = assigned_head_positions.contiguous( ).view(-1) # assigned_tail_positions = assigned_locations[:, :, 1] assigned_tail_positions = assigned_tail_positions.contiguous( ).view(-1) head_pointer_probs = head_pointer_probs.contiguous().view( -1, head_pointer_probs.size()[-1]) tail_pointer_probs = tail_pointer_probs.contiguous().view( -1, tail_pointer_probs.size()[-1]) # mask here: if there is non in assigned scores, no need to compute ... assigned_head_positions = torch.masked_select( assigned_head_positions, assigned_scores.byte()) assigned_tail_positions = torch.masked_select( assigned_tail_positions, assigned_scores.byte()) head_pointer_probs = torch.index_select( head_pointer_probs, dim=0, index=assigned_scores.nonzero().squeeze(1)) tail_pointer_probs = torch.index_select( tail_pointer_probs, dim=0, index=assigned_scores.nonzero().squeeze(1)) assigned_head_positions = to_one_hot(assigned_head_positions, input_length) assigned_tail_positions = to_one_hot(assigned_tail_positions, input_length) prediction_head_loss = EMD_L2(head_pointer_probs, assigned_head_positions, needSoftMax=True) prediction_tail_loss = EMD_L2(tail_pointer_probs, assigned_tail_positions, needSoftMax=True) loc_losses.update( prediction_head_loss.data.item() + prediction_tail_loss.data.item(), batch_size) total_loss = args.alpha * (prediction_head_loss + prediction_tail_loss) + cls_loss else: total_loss = cls_loss model_optim.zero_grad() total_loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.) model_optim.step() cls_losses.update(cls_loss.data.item(), batch_size) total_losses.update(total_loss.item(), batch_size) logger.info( "Train -- Epoch :{:06d}, LR: {:.6f},\tloss={:.4f}, \t c-loss:{:.4f}, \tloc-loss:{:.4f}\tcls-Accuracy:{:.4f}\tloc-Avg-IOU:{:.4f}\t topIOU:{:.4f}" .format(epoch, model_optim.param_groups[0]['lr'], total_losses.avg, cls_losses.avg, loc_losses.avg, Accuracy.avg, IOU.avg, ordered_IOU.avg)) optim_scheduler.step(total_losses.avg) total_losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() Accuracy = AverageMeter() IOU = AverageMeter() ordered_IOU = AverageMeter() model.eval() pbar = progressbar.ProgressBar(max_value=dev_manager.len_data) for batch_idx, batch in enumerate( dev_manager.iter_batch(shuffle=True)): pbar.update(batch_idx) word_vectors = torch.LongTensor(batch[1]) seg_vectors = torch.LongTensor(batch[2]) batch_size = word_vectors.shape[0] input_length = word_vectors.shape[1] word_input = Variable(word_vectors) seg_input = Variable(seg_vectors) if use_cuda: word_input = word_input.cuda() seg_input = seg_input.cuda() tagging_BIOUS = batch[3] segments, max_len = convertBIOU2SegmentsBatch( tagging_BIOUS, id_to_tag) head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, _ = model( word_input, seg_input, max_len) pred_positions = torch.stack([head_positions, tail_positions], dim=-1) gt_positions, gt_valids = createPytorchLabels(segments, max_len) assigned_scores, assigned_locations = h_assign.Assign_Batch( gt_positions, pred_positions, gt_valids, thres=args.hassign_thres) if np.sum(assigned_scores) >= 1: iou_rate, effective_positives = Metrics.get_avg_iou2( np.reshape(pred_positions.data.cpu().numpy(), (-1, 2)), np.reshape(assigned_locations, (-1, 2)), np.reshape( assigned_scores, assigned_scores.shape[0] * assigned_scores.shape[1])) IOU.update(iou_rate / (effective_positives), effective_positives) # ordered_IOU.update(ordered_iou_rate/(args.batch_size*args.n_outputs),args.batch_size*args.n_outputs) # n_effective_batches += 1 assigned_scores = Variable(torch.LongTensor(assigned_scores), requires_grad=False) assigned_locations = Variable(torch.LongTensor(assigned_locations), requires_grad=False) if use_cuda: assigned_scores = assigned_scores.cuda() assigned_locations = assigned_locations.cuda() cls_scores = cls_scores.contiguous().view(-1, cls_scores.size()[-1]) assigned_scores = assigned_scores.contiguous().view(-1) cls_loss = F.cross_entropy(cls_scores, assigned_scores) if torch.sum(assigned_scores) > 0: # print("HAHA") assigned_head_positions = assigned_locations[:, :, 0] assigned_head_positions = assigned_head_positions.contiguous( ).view(-1) # assigned_tail_positions = assigned_locations[:, :, 1] assigned_tail_positions = assigned_tail_positions.contiguous( ).view(-1) head_pointer_probs = head_pointer_probs.contiguous().view( -1, head_pointer_probs.size()[-1]) tail_pointer_probs = tail_pointer_probs.contiguous().view( -1, tail_pointer_probs.size()[-1]) # mask here: if there is non in assigned scores, no need to compute ... assigned_head_positions = torch.masked_select( assigned_head_positions, assigned_scores.byte()) assigned_tail_positions = torch.masked_select( assigned_tail_positions, assigned_scores.byte()) head_pointer_probs = torch.index_select( head_pointer_probs, dim=0, index=assigned_scores.nonzero().squeeze(1)) tail_pointer_probs = torch.index_select( tail_pointer_probs, dim=0, index=assigned_scores.nonzero().squeeze(1)) assigned_head_positions = to_one_hot(assigned_head_positions, input_length) assigned_tail_positions = to_one_hot(assigned_tail_positions, input_length) prediction_head_loss = EMD_L2(head_pointer_probs, assigned_head_positions, needSoftMax=True) prediction_tail_loss = EMD_L2(tail_pointer_probs, assigned_tail_positions, needSoftMax=True) loc_losses.update( prediction_head_loss.data.item() + prediction_tail_loss.data.item(), batch_size) total_loss = args.alpha * (prediction_head_loss + prediction_tail_loss) + cls_loss else: total_loss = cls_loss # model_optim.zero_grad() # total_loss.backward() # torch.nn.utils.clip_grad_norm_(model.parameters(), 1.) # model_optim.step() cls_losses.update(cls_loss.data.item(), batch_size) total_losses.update(total_loss.item(), batch_size) logger.info( "Val -- Epoch :{:06d}, LR: {:.6f},\tloss={:.4f}, \t c-loss:{:.4f}, \tloc-loss:{:.4f}\tcls-Accuracy:{:.4f}\tloc-Avg-IOU:{:.4f}\t topIOU:{:.4f}" .format(epoch, model_optim.param_groups[0]['lr'], total_losses.avg, cls_losses.avg, loc_losses.avg, Accuracy.avg, IOU.avg, ordered_IOU.avg)) if epoch % 1 == 0: save_checkpoint( { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'loss': total_losses.avg, 'cls_loss': cls_losses.avg, 'loc_loss': loc_losses.avg, 'IoU': IOU.avg }, (epoch + 1), file_direcotry=save_places.model_save_dir)
def main(): global args args = (parser.parse_args()) use_cuda = cuda_model.ifUseCuda(args.gpu_id, args.multiGpu) # Pretty print the run args pp.pprint(vars(args)) model = PointerNetwork(input_dim=args.input_dim, embedding_dim=args.embedding_dim, max_decoding_len=args.net_outputs, dropout=0.5) print("Number of Params\t{:d}".format( sum([p.data.nelement() for p in model.parameters()]))) save_directory = 'gru1head_s1_ckpt' if args.resume is not None: ckpt_idx = 9 ckpt_filename = args.resume.format(ckpt_idx) assert os.path.isfile( ckpt_filename), 'Error: no checkpoint directory found!' checkpoint = torch.load(ckpt_filename, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint['state_dict'], strict=False) train_iou = checkpoint['IoU'] args.start_epoch = checkpoint['epoch'] print("=> loading checkpoint '{}', current iou: {:.04f}".format( ckpt_filename, train_iou)) model = cuda_model.convertModel2Cuda(model, gpu_id=args.gpu_id, multiGpu=args.multiGpu) train_dataset = THUMOST14(seq_length=args.seq_len, overlap=3, sample_rate=1, dataset_split='train') val_dataset = THUMOST14(seq_length=args.seq_len, overlap=3, sample_rate=1, dataset_split='val') train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) model_optim = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=float(args.lr)) optim_scheduler = optim.lr_scheduler.ReduceLROnPlateau(model_optim, 'min') alpha = 0.1 for epoch in range(args.start_epoch, args.nof_epoch + args.start_epoch): total_losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() Accuracy = AverageMeter() IOU = AverageMeter() ordered_IOU = AverageMeter() model.train() pbar = progressbar.ProgressBar(max_value=len(train_dataloader)) for i_batch, sample_batch in enumerate(train_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batch[0]) cur_batch_size = feature_batch.shape[0] pointer_positions = sample_batch[1] valid_indices = sample_batch[2] valid_indicators = torch.zeros([cur_batch_size, args.net_outputs]).long() assigned_positions = torch.zeros( [cur_batch_size, args.net_outputs]).long() for batch_idx in range(cur_batch_size): bounded_valid_idx = min(valid_indices[batch_idx, 0], args.net_outputs) valid_indicators[batch_idx, :bounded_valid_idx] = 1 assigned_positions[ batch_idx, :bounded_valid_idx] = pointer_positions[ batch_idx, :bounded_valid_idx] if use_cuda: feature_batch = feature_batch.cuda() assigned_positions = assigned_positions.cuda() valid_indicators = valid_indicators.cuda() pred_pointer_probs, pred_positions, cls_scores = model( feature_batch) valid_indicators = valid_indicators.contiguous().view(-1) assigned_positions = assigned_positions.contiguous().view(-1) cls_scores = cls_scores.contiguous().view(-1, cls_scores.size()[-1]) cls_loss = F.cross_entropy(cls_scores, valid_indicators) if torch.sum(valid_indicators) > 0: pred_pointer_probs = pred_pointer_probs.contiguous().view( -1, pred_pointer_probs.size()[-1]) assigned_positions = torch.masked_select( assigned_positions, valid_indicators.byte()) pred_pointer_probs = torch.index_select( pred_pointer_probs, dim=0, index=valid_indicators.nonzero().squeeze(1)) prediction_head_loss = F.cross_entropy((pred_pointer_probs), assigned_positions) loc_losses.update(prediction_head_loss.data.item(), feature_batch.size(0)) total_loss = alpha * (prediction_head_loss) + cls_loss else: total_loss = cls_loss model_optim.zero_grad() total_loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.) model_optim.step() cls_losses.update(cls_loss.data.item(), feature_batch.size(0)) total_losses.update(total_loss.data.item(), feature_batch.size(0)) print( "Train -- Epoch :{:06d}, LR: {:.6f},\tloss={:.4f}, \t c-loss:{:.4f}, \tloc-loss:{:.4f}\tcls-Accuracy:{:.4f}\tloc-Avg-IOU:{:.4f}\t topIOU:{:.4f}" .format(epoch, model_optim.param_groups[0]['lr'], total_losses.avg, cls_losses.avg, loc_losses.avg, Accuracy.avg, IOU.avg, ordered_IOU.avg)) if epoch % 1 == 0: save_checkpoint( { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'loss': total_losses.avg, 'cls_loss': cls_losses.avg, 'loc_loss': loc_losses.avg, 'IoU': IOU.avg }, (epoch + 1), file_direcotry=save_directory) optim_scheduler.step(total_losses.avg) model.eval() total_losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() Accuracy = AverageMeter() IOU = AverageMeter() ordered_IOU = AverageMeter() pbar = progressbar.ProgressBar(max_value=len(val_dataloader)) for i_batch, sample_batch in enumerate(val_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batch[0]) cur_batch_size = feature_batch.shape[0] pointer_positions = sample_batch[1] valid_indices = sample_batch[2] valid_indicators = torch.zeros([cur_batch_size, args.net_outputs]).long() assigned_positions = torch.zeros( [cur_batch_size, args.net_outputs]).long() for batch_idx in range(cur_batch_size): bounded_valid_idx = min(valid_indices[batch_idx, 0], args.net_outputs) valid_indicators[batch_idx, :bounded_valid_idx] = 1 assigned_positions[ batch_idx, :bounded_valid_idx] = pointer_positions[ batch_idx, :bounded_valid_idx] if use_cuda: feature_batch = feature_batch.cuda() assigned_positions = assigned_positions.cuda() valid_indicators = valid_indicators.cuda() pred_pointer_probs, pred_positions, cls_scores = model( feature_batch) valid_indicators = valid_indicators.contiguous().view(-1) assigned_positions = assigned_positions.contiguous().view(-1) cls_scores = cls_scores.contiguous().view(-1, cls_scores.size()[-1]) cls_loss = F.cross_entropy(cls_scores, valid_indicators) if torch.sum(valid_indicators) > 0: pred_pointer_probs = pred_pointer_probs.contiguous().view( -1, pred_pointer_probs.size()[-1]) assigned_positions = torch.masked_select( assigned_positions, valid_indicators.byte()) pred_pointer_probs = torch.index_select( pred_pointer_probs, dim=0, index=valid_indicators.nonzero().squeeze(1)) prediction_head_loss = F.cross_entropy((pred_pointer_probs), assigned_positions) loc_losses.update(prediction_head_loss.data.item(), feature_batch.size(0)) total_loss = alpha * (prediction_head_loss) + cls_loss else: total_loss = cls_loss cls_losses.update(cls_loss.data.item(), feature_batch.size(0)) total_losses.update(total_loss.data.item(), feature_batch.size(0)) print( "Val -- Epoch :{:06d}, LR: {:.6f},\tloss={:.4f}, \t c-loss:{:.4f}, \tloc-loss:{:.4f}\tcls-Accuracy:{:.4f}\tloc-Avg-IOU:{:.4f}\t topIOU:{:.4f}" .format(epoch, model_optim.param_groups[0]['lr'], total_losses.avg, cls_losses.avg, loc_losses.avg, Accuracy.avg, IOU.avg, ordered_IOU.avg))
def main(): global args args = parser.parse_args() use_cuda = cuda_model.ifUseCuda(args.gpu_id, args.multiGpu) model = network.TURN(feature_size=args.input_size, mid_layer_size=args.hidden_size, drop=args.dropout) print("Number of Params \t{:d}".format( sum([p.data.nelement() for p in model.parameters()]))) if args.resume is not None: assert os.path.isfile( args.resume), 'Error: no checkpoint directory found!' checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage) args.start_epoch = checkpoint['epoch'] # args.start_epoch = 0 model.load_state_dict(checkpoint['state_dict'], strict=False) print("=> loading checkpoint '{:s}', epoch: {:d}\n".format( args.resume, args.start_epoch)) else: print("Training from srcatch") model = cuda_model.convertModel2Cuda(model, gpu_id=args.gpu_id, multiGpu=args.multiGpu) feature_directory = '/home/zwei/datasets/THUMOS14/features/denseflow' train_clip_foreground_path = '/home/zwei/Dev/TURN_TAP_ICCV17/turn_codes/val_training_samples.txt' train_clip_background_path = '/home/zwei/Dev/TURN_TAP_ICCV17/turn_codes/background_samples.txt' val_clip_path = '/home/zwei/Dev/TURN_TAP_ICCV17/turn_codes/test_swin.txt' train_dataset = thumos14_iccv17.TrainDataSet( feature_directory=feature_directory, foreground_path=train_clip_foreground_path, background_path=train_clip_background_path, n_ctx=4, feature_size=args.input_size) val_dataset = thumos14_iccv17.EvaluateDataset( feature_directory=feature_directory, clip_path=val_clip_path, n_ctx=4, unit_size=16., feature_size=args.input_size) train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4) if args.eval: evaluator = evaluation.Evaluator(dataloader=val_dataloader, save_directory=args.branch, savename=os.path.basename( args.resume)) evaluator.evaluate(model, use_cuda=use_cuda) sys.exit(0) model_optim = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr) best_status = { 'train_accuracy': 0, 'val_accuracy': 0, 'train_loss': float('inf'), 'val_loss': float('inf') } isBest_status = { 'train_accuracy': 0, 'val_accuracy': 0, 'train_loss': 0, 'val_loss': 0 } for epoch in range(args.start_epoch, args.nof_epoch): total_losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() Accuracy_cls = AverageMeter() Accuracy_loc = AverageMeter() model.train() pbar = progressbar.ProgressBar(max_value=len(train_dataloader)) for i_batch, sample_batched in enumerate(train_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batched[0]) offset_batch = Variable(sample_batched[1]) label_batch = Variable(sample_batched[2]) clip_batch = (sample_batched[3]) if use_cuda: feature_batch = feature_batch.cuda() offset_batch = offset_batch.cuda() label_batch = label_batch.cuda() # clip_batch = clip_batch.cuda() if args.normalize > 0: feature_batch = F.normalize(feature_batch, p=2, dim=1) output_v = model(feature_batch) cls_logits, loc_logits, _, _ = network.extract_outputs(output_v) cls_loss = network.cls_loss(cls_logits, label_batch.long()) loc_loss = network.loc_loss(loc_logits, offset_batch, label_batch) cls_accuracy = Metrics.accuracy_topN(cls_logits.data, label_batch.long().data) loc_accuracy, n_valid = Metrics.IoU(clip_batch.numpy(), loc_logits.data.cpu().numpy(), label_batch.data.cpu().numpy()) total_loss = cls_loss + args.plambda * loc_loss model_optim.zero_grad() total_loss.backward() model_optim.step() total_losses.update(total_loss.data[0], feature_batch.size(0)) cls_losses.update(cls_loss.data[0], feature_batch.size(0)) loc_losses.update(loc_loss.data[0], feature_batch.size(0)) Accuracy_cls.update(cls_accuracy[0][0], feature_batch.size(0)) Accuracy_loc.update(loc_accuracy, n_valid) print( "Train -- Epoch :{:06d}, LR: {:.6f},\tloc-loss={:.4f}\tcls-loss={:.4f}\tCls-Accuracy={:.4f}\tIoU={:.4f}" .format(epoch, model_optim.param_groups[0]['lr'], loc_losses.avg, cls_losses.avg, Accuracy_cls.avg, Accuracy_loc.avg)) if best_status['train_loss'] > total_losses.avg: best_status['train_loss'] = total_losses.avg isBest_status['train_loss'] = 1 if best_status['train_accuracy'] < Accuracy_cls.avg: best_status['train_accuracy'] = Accuracy_cls.avg isBest_status['train_accuracy'] = 1 save_checkpoint( { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'val_loss': best_status['val_loss'], 'val_accuracy': best_status['val_accuracy'], 'train_loss': best_status['train_loss'], 'train_accuracy': best_status['train_accuracy'] }, isBest_status, file_direcotry=args.branch) for item in isBest_status.keys(): isBest_status[item] = 0
def main(): global args args = (parser.parse_args()) use_cuda = cuda_model.ifUseCuda(args.gpu_id, args.multiGpu) pp.pprint(vars(args)) model = PointerNetwork(input_dim=args.input_dim, embedding_dim=args.embedding_dim, hidden_dim=args.hidden_dim, max_decoding_len=args.net_outputs, dropout=0.5) print("Number of Params\t{:d}".format( sum([p.data.nelement() for p in model.parameters()]))) # save_directory = 'gru2heads_proposal_s4-2_ckpt' if args.resume is not None: ckpt_idx = 2 ckpt_filename = args.resume.format(ckpt_idx) assert os.path.isfile( ckpt_filename), 'Error: no checkpoint directory found!' checkpoint = torch.load(ckpt_filename, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint['state_dict'], strict=True) args.start_epoch = checkpoint['epoch'] train_iou = checkpoint['IoU'] train_tloss = checkpoint['loss'] train_cls_loss = checkpoint['cls_loss'] train_loc_loss = checkpoint['loc_loss'] print( "=> loading checkpoint '{}', total loss: {:.04f},\t cls_loss: {:.04f},\t loc_loss: {:.04f}," " \tcurrent iou: {:.04f}".format(ckpt_filename, train_tloss, train_cls_loss, train_loc_loss, train_iou)) model = cuda_model.convertModel2Cuda(model, gpu_id=args.gpu_id, multiGpu=args.multiGpu) train_dataset = THUMOST14(seq_length=args.seq_len, overlap=0.9, sample_rate=[1, 2, 4], dataset_split='val') train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4) model_optim = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=float(args.lr)) optim_scheduler = optim.lr_scheduler.ReduceLROnPlateau(model_optim, 'min') alpha = 0.1 for epoch in range(args.start_epoch, args.nof_epoch + args.start_epoch): total_losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() Accuracy = AverageMeter() IOU = AverageMeter() ordered_IOU = AverageMeter() #Update here! model.eval() pbar = progressbar.ProgressBar(max_value=len(train_dataloader)) for i_batch, sample_batch in enumerate(train_dataloader): # pbar.update(i_batch) feature_batch = Variable(sample_batch[0]) start_indices = Variable(sample_batch[1]) end_indices = Variable(sample_batch[2]) valid_indices = Variable(sample_batch[3]) if use_cuda: feature_batch = feature_batch.cuda() start_indices = start_indices.cuda() end_indices = end_indices.cuda() # gt_positions = torch.stack([start_indices, end_indices], dim=-1) head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, _ = model( feature_batch) pred_positions = torch.stack([head_positions, tail_positions], dim=-1) assigned_scores, assigned_locations = h_match.Assign_Batch( gt_positions, pred_positions, valid_indices, thres=0.5) if valid_indices.byte().any() > 0: print "Output at {:d}".format(i_batch) # n_valid = valid_indices.data[0, 0] view_idx = valid_indices.nonzero()[0][0].item() n_valid = valid_indices[view_idx, 0].item() print "GT:" print(gt_positions[view_idx, :n_valid, :]) print("Pred") print(pred_positions[view_idx]) _, head_sort = head_pointer_probs[view_idx, 0, :].sort() _, tail_sort = tail_pointer_probs[view_idx, 0, :].sort() print("END of {:d}".format(i_batch)) # iou_rate, effective_positives = Losses.Metrics.get_avg_iou2( np.reshape(pred_positions.data.cpu().numpy(), (-1, 2)), np.reshape(assigned_locations, (-1, 2)), np.reshape( assigned_scores, assigned_scores.shape[0] * assigned_scores.shape[1])) IOU.update(iou_rate / (effective_positives), effective_positives) assigned_scores = Variable(torch.LongTensor(assigned_scores), requires_grad=False) assigned_locations = Variable( torch.LongTensor(assigned_locations), requires_grad=False) if use_cuda: assigned_scores = assigned_scores.cuda() assigned_locations = assigned_locations.cuda() cls_scores = cls_scores.contiguous().view( -1, cls_scores.size()[-1]) assigned_scores = assigned_scores.contiguous().view(-1) cls_loss = F.cross_entropy(cls_scores, assigned_scores) assigned_head_positions = assigned_locations[:, :, 0] assigned_head_positions = assigned_head_positions.contiguous( ).view(-1) # assigned_tail_positions = assigned_locations[:, :, 1] assigned_tail_positions = assigned_tail_positions.contiguous( ).view(-1) head_pointer_probs = head_pointer_probs.contiguous().view( -1, head_pointer_probs.size()[-1]) tail_pointer_probs = tail_pointer_probs.contiguous().view( -1, tail_pointer_probs.size()[-1]) # start_indices = start_indices.contiguous().view(-1) # end_indices = end_indices.contiguous().view(-1) # with case instances.... prediction_head_loss = F.cross_entropy((head_pointer_probs), assigned_head_positions, reduce=False) prediction_head_loss = torch.mean(prediction_head_loss * assigned_scores.float()) prediction_tail_loss = F.cross_entropy((tail_pointer_probs), assigned_tail_positions, reduce=False) prediction_tail_loss = torch.mean(prediction_tail_loss * assigned_scores.float()) total_loss = alpha * (prediction_head_loss + prediction_tail_loss) + cls_loss # # # model_optim.zero_grad() # # total_loss.backward() # # torch.nn.utils.clip_grad_norm(model.parameters(), 1.) # # model_optim.step() cls_losses.update(cls_loss.data.item(), feature_batch.size(0)) loc_losses.update( prediction_head_loss.data.item() + prediction_tail_loss.data.item(), feature_batch.size(0)) total_losses.update(total_loss.data.item(), feature_batch.size(0)) # # print( "Train -- Epoch :{:06d}, LR: {:.6f},\tloss={:.4f}, \t c-loss:{:.4f}, \tloc-loss:{:.4f}\tcls-Accuracy:{:.4f}\tloc-Avg-IOU:{:.4f}\t topIOU:{:.4f}" .format(epoch, model_optim.param_groups[0]['lr'], total_losses.avg, cls_losses.avg, loc_losses.avg, Accuracy.avg, IOU.avg, ordered_IOU.avg)) break
def main(): global args args = parser.parse_args() use_cuda = cuda_model.ifUseCuda(args.gpu_id, args.multiGpu) model = PointerNet(args.input_size, args.hidden_size, args.nlayers, args.dropout, args.bidir) if args.resume is not None: assert os.path.isfile( args.resume), 'Error: no checkpoint directory found!' checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage) args.start_epoch = checkpoint['epoch'] args.start_epoch = 0 model.load_state_dict(checkpoint['state_dict'], strict=False) print("=> loading checkpoint '{:s}', epoch: {:d}".format( args.resume, args.start_epoch)) model = cuda_model.convertModel2Cuda(model, gpu_id=args.gpu_id, multiGpu=args.multiGpu) train_dataset = LocalDataLoader.Dataset(dataset_name="SumMe", split='train', clip_size=args.clip_size, output_score=True, output_rdIdx=True, sample_rates=[1, 5, 10]) val_dataset = LocalDataLoader.Dataset(dataset_name="SumMe", split='val', clip_size=args.clip_size, output_score=True, output_rdIdx=True) train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) model_optim = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr) best_status = { 'train_accuracy': 0, 'val_accuracy': 0, 'train_loss': float('inf'), 'val_loss': float('inf') } isBest_status = { 'train_accuracy': 0, 'val_accuracy': 0, 'train_loss': 0, 'val_loss': 0 } for epoch in range(args.start_epoch, args.nof_epoch): total_losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() Accuracy_Top1 = AverageMeter() Accuracy_Top3 = AverageMeter() F1_Top1 = AverageMeter() F1_Top3 = AverageMeter() model.train() pbar = progressbar.ProgressBar(max_value=len(train_dataloader)) for i_batch, sample_batched in enumerate(train_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batched[0]) gt_index_batch = Variable(sample_batched[1]) score_batch = Variable(sample_batched[2]) rd_index_batch = Variable(sample_batched[3]) if use_cuda: feature_batch = feature_batch.cuda() rd_index_batch = rd_index_batch.cuda() gt_index_batch = gt_index_batch.cuda() score_batch = score_batch.cuda() rd_starting_index_batch = rd_index_batch[:, 0] rd_ending_index_batch = rd_index_batch[:, 1] _, segment_score = model(feature_batch, starting_idx=rd_starting_index_batch, ending_idx=rd_ending_index_batch) overlap = loss_transforms.IoU_Overlaps(rd_index_batch.data, gt_index_batch.data) overlap = Variable(overlap, requires_grad=False) if use_cuda: overlap = overlap.cuda() cls_loss = losses.ClsLocLoss_Regression(segment_score, score_batch, overlap, thres=0.5) total_loss = cls_loss model_optim.zero_grad() total_loss.backward() model_optim.step() total_losses.update(total_loss.data[0], feature_batch.size(0)) print("Train -- Epoch :{:06d}, LR: {:.6f},\tcls-loss={:.4f}".format( epoch, model_optim.param_groups[0]['lr'], total_losses.avg)) if best_status['train_loss'] > total_losses.avg: best_status['train_loss'] = total_losses.avg isBest_status['train_loss'] = 1 if best_status['train_accuracy'] < Accuracy_Top1.avg: best_status['train_accuracy'] = Accuracy_Top1.avg isBest_status['train_accuracy'] = 1 model.eval() total_losses.reset() loc_losses.reset() cls_losses.reset() Accuracy_Top1.reset() Accuracy_Top3.reset() F1_Top1.reset() F1_Top3.reset() pbar = progressbar.ProgressBar(max_value=len(val_dataloader)) for i_batch, sample_batched in enumerate(val_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batched[0]) gt_index_batch = Variable(sample_batched[1]) score_batch = Variable(sample_batched[2]) rd_index_batch = Variable(sample_batched[3]) if use_cuda: feature_batch = feature_batch.cuda() rd_index_batch = rd_index_batch.cuda() gt_index_batch = gt_index_batch.cuda() score_batch = score_batch.cuda() rd_starting_index_batch = rd_index_batch[:, 0] rd_ending_index_batch = rd_index_batch[:, 1] _, segment_score = model(feature_batch, starting_idx=rd_starting_index_batch, ending_idx=rd_ending_index_batch) overlap = loss_transforms.IoU_Overlaps(rd_index_batch.data, gt_index_batch.data) overlap = Variable(overlap, requires_grad=False) if use_cuda: overlap = overlap.cuda() cls_loss = losses.ClsLocLoss_Regression(segment_score, score_batch, overlap, thres=0.5) total_loss = cls_loss # Here adjust the ratio based on loss values... total_losses.update(total_loss.data[0], feature_batch.size(0)) print("Test -- Epoch :{:06d}, LR: {:.6f},\tcls-loss={:.4f}".format( epoch, model_optim.param_groups[0]['lr'], total_losses.avg, )) # val_F1_score = val_evaluator.EvaluateTop1(model, use_cuda) # print "Val F1 Score: {:f}".format(val_F1_score) if best_status['val_loss'] > total_losses.avg: best_status['val_loss'] = total_losses.avg isBest_status['val_loss'] = 1 if best_status['val_accuracy'] < Accuracy_Top1.avg: best_status['val_accuracy'] = Accuracy_Top1.avg isBest_status['val_accuracy'] = 1 save_checkpoint( { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'val_loss': best_status['val_loss'], 'val_accuracy': best_status['val_accuracy'], 'train_loss': best_status['train_loss'], 'train_accuracy': best_status['train_accuracy'] }, isBest_status, file_direcotry='vsPtrDep_Classification') for item in isBest_status.keys(): isBest_status[item] = 0
def main(): global args args = (parser.parse_args()) use_cuda = cuda_model.ifUseCuda(args.gpu_id, args.multiGpu) script_name_stem = dir_utils.get_stem(__file__) save_directory = dir_utils.get_dir( os.path.join( project_root, 'ckpts', '{:s}-{:s}-{:s}-split-{:d}-decoderatio-{:.2f}-alpha{:.4f}-dim{:d}-dropout{:.4f}' .format(script_name_stem, args.dataset, args.eval_metrics, args.split, args.decode_ratio, args.alpha, args.hidden_dim, args.dropout))) log_file = os.path.join(save_directory, 'log-{:s}.txt'.format(dir_utils.get_date_str())) logger = log_utils.get_logger(log_file) log_utils.print_config(vars(args), logger) # get train/val split if args.dataset == 'SumMe': train_val_perms = np.arange(25) elif args.dataset == 'TVSum': train_val_perms = np.arange(50) # fixed permutation random.Random(0).shuffle(train_val_perms) train_val_perms = train_val_perms.reshape([5, -1]) train_perms = np.delete(train_val_perms, args.split, 0).reshape([-1]) val_perms = train_val_perms[args.split] logger.info(" training split: " + str(train_perms)) logger.info(" val split: " + str(val_perms)) if args.location == 'home': data_path = os.path.join(os.path.expanduser('~'), 'datasets') else: data_path = os.path.join('/nfs/%s/boyu/SDN' % (args.location), 'datasets') train_dataset = vsTVSum_Loader3_c3dd_segment.cDataset( dataset_name=args.dataset, split='train', decode_ratio=args.decode_ratio, train_val_perms=train_perms, data_path=data_path) max_input_len = train_dataset.max_input_len maximum_outputs = int(args.decode_ratio * max_input_len) val_dataset = vsTVSum_Loader3_c3dd_segment.cDataset( dataset_name=args.dataset, split='val', decode_ratio=args.decode_ratio, train_val_perms=val_perms, data_path=data_path) train_evaluator = Evaluator.Evaluator(dataset_name=args.dataset, split='tr', max_input_len=max_input_len, maximum_outputs=maximum_outputs, sum_budget=0.15, train_val_perms=train_perms, eval_metrics=args.eval_metrics, data_path=data_path) val_evaluator = Evaluator.Evaluator(dataset_name=args.dataset, split='val', max_input_len=max_input_len, maximum_outputs=maximum_outputs, sum_budget=0.15, train_val_perms=val_perms, eval_metrics=args.eval_metrics, data_path=data_path) train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4) model = PointerNetwork(input_dim=args.input_dim, embedding_dim=args.embedding_dim, hidden_dim=args.hidden_dim, max_decoding_len=maximum_outputs, dropout=args.dropout, n_enc_layers=2, output_classes=1) # hassign_thres = args.hassign_thres logger.info("Number of Params\t{:d}".format( sum([p.data.nelement() for p in model.parameters()]))) logger.info('Saving logs to {:s}'.format(log_file)) if args.resume is not None: ckpt_idx = 48 ckpt_filename = args.resume.format(ckpt_idx) assert os.path.isfile( ckpt_filename), 'Error: no checkpoint directory found!' checkpoint = torch.load(ckpt_filename, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint['state_dict'], strict=False) train_iou = checkpoint['IoU'] args.start_epoch = checkpoint['epoch'] logger.info("=> loading checkpoint '{}', current iou: {:.04f}".format( ckpt_filename, train_iou)) model = cuda_model.convertModel2Cuda(model, gpu_id=args.gpu_id, multiGpu=args.multiGpu) model_optim = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=float(args.lr)) optim_scheduler = optim.lr_scheduler.ReduceLROnPlateau(model_optim, 'min', patience=10) alpha = args.alpha # cls_weights = torch.FloatTensor([0.2, 1.0]).cuda() # if args.set_cls_weight: # cls_weights = torch.FloatTensor([1.*train_dataset.n_positive_train_samples/train_dataset.n_total_train_samples, args.cls_pos_weight]).cuda() # else: # cls_weights = torch.FloatTensor([0.5, 0.5]).cuda() # logger.info(" total: {:d}, total pos: {:d}".format(train_dataset.n_total_train_samples, train_dataset.n_positive_train_samples)) # logger.info(" classify weight: " + str(cls_weights[0]) + str(cls_weights[1])) for epoch in range(args.start_epoch, args.nof_epoch + args.start_epoch): total_losses = AverageMeter() # loc_losses = AverageMeter() pointer_losses = AverageMeter() rgs_losses = AverageMeter() Accuracy = AverageMeter() # IOU = AverageMeter() # ordered_IOU = AverageMeter() model.train() pbar = progressbar.ProgressBar(max_value=len(train_dataloader)) for i_batch, sample_batch in enumerate(train_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batch[0]) pointer_indices = Variable(sample_batch[1]) pointer_scores = Variable(sample_batch[2]) gt_valids = Variable(sample_batch[3]) # seq_labels = Variable(sample_batch[3]) if use_cuda: feature_batch = feature_batch.cuda() pointer_indices = pointer_indices.cuda() pointer_scores = pointer_scores.cuda() gt_positions = pointer_indices gt_scores = pointer_scores pointer_probs, pointer_positions, cls_scores, _ = model( feature_batch) pred_positions = pointer_positions cls_scores = cls_scores.contiguous().squeeze(2) # print(pointer_probs.size()) # print(gt_positions.size()) pointer_loss = F.cross_entropy(pointer_probs.permute(0, 2, 1), gt_positions) rgs_loss = F.mse_loss(cls_scores, gt_scores) total_loss = alpha * pointer_loss + rgs_loss model_optim.zero_grad() total_loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.) model_optim.step() pointer_losses.update(pointer_loss.data.item(), feature_batch.size(0)) rgs_losses.update(rgs_loss.data.item(), feature_batch.size(0)) total_losses.update(total_loss.item(), feature_batch.size(0)) logger.info( "Train -- Epoch :{:06d}, LR: {:.6f},\tloss={:.4f}, \t pointer-loss:{:.4f}, \tregress-loss:{:.4f}\tcls-Accuracy:{:.4f}" .format(epoch, model_optim.param_groups[0]['lr'], total_losses.avg, pointer_losses.avg, rgs_losses.avg, Accuracy.avg)) optim_scheduler.step(total_losses.avg) model.eval() pointer_losses = AverageMeter() rgs_losses = AverageMeter() pbar = progressbar.ProgressBar(max_value=len(val_dataloader)) for i_batch, sample_batch in enumerate(val_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batch[0]) pointer_indices = Variable(sample_batch[1]) pointer_scores = Variable(sample_batch[2]) gt_valids = Variable(sample_batch[3]) # valid_indices = Variable(sample_batch[3]) if use_cuda: feature_batch = feature_batch.cuda() pointer_indices = pointer_indices.cuda() pointer_scores = pointer_scores.cuda() gt_positions = pointer_indices gt_scores = pointer_scores pointer_probs, pointer_positions, cls_scores, _ = model( feature_batch) pred_positions = pointer_positions cls_scores = cls_scores.contiguous().squeeze(2) pointer_loss = F.cross_entropy(pointer_probs.permute(0, 2, 1), gt_positions) rgs_loss = F.mse_loss(cls_scores, gt_scores) pointer_losses.update(pointer_loss.data.item(), feature_batch.size(0)) rgs_losses.update(rgs_loss.data.item(), feature_batch.size(0)) train_F1s = train_evaluator.Evaluate(model) val_F1s = val_evaluator.Evaluate(model) logger.info("Train -- Epoch :{:06d},\tF1s:{:.4f}".format( epoch, train_F1s)) logger.info( "Val -- Epoch :{:06d},\t pointer-loss:{:.4f}, \tregress-loss:{:.4f}, \tF1s{:.4f}" .format(epoch, pointer_losses.avg, rgs_losses.avg, val_F1s)) if epoch % 1 == 0: save_checkpoint( { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'loss': total_losses.avg, 'pointer_loss': pointer_losses.avg, 'rgs_loss': rgs_losses.avg, 'val_F1s': val_F1s }, (epoch + 1), file_direcotry=save_directory)
def main(): global args args = (parser.parse_args()) use_cuda = cuda_model.ifUseCuda(args.gpu_id, args.multiGpu) # Pretty print the run args pp.pprint(vars(args)) model = PointerNetwork(input_dim=args.input_dim, embedding_dim=args.embedding_dim, hidden_dim=args.hidden_dim, max_decoding_len=args.net_outputs) print("Number of Params\t{:d}".format( sum([p.data.nelement() for p in model.parameters()]))) if args.resume is not None: ckpt_idx = 11 ckpt_filename = args.resume.format(ckpt_idx) assert os.path.isfile( ckpt_filename), 'Error: no checkpoint directory found!' checkpoint = torch.load(ckpt_filename, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint['state_dict'], strict=False) train_iou = checkpoint['IoU'] args.start_epoch = checkpoint['epoch'] print("=> loading checkpoint '{}', current iou: {:.04f}".format( ckpt_filename, train_iou)) model = cuda_model.convertModel2Cuda(model, gpu_id=args.gpu_id, multiGpu=args.multiGpu) train_dataset = THUMOST14(seq_length=args.seq_len) train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) # val_dataloader = DataLoader(val_dataset, # batch_size=args.batch_size, # shuffle=True, # num_workers=4) model_optim = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=float(args.lr)) alpha = 1.0 for epoch in range(args.start_epoch, args.nof_epoch + args.start_epoch): total_losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() Accuracy = AverageMeter() IOU = AverageMeter() ordered_IOU = AverageMeter() model.train() pbar = progressbar.ProgressBar(max_value=len(train_dataloader)) n_effective_batches = 0 for i_batch, sample_batch in enumerate(train_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batch[0], requires_grad=True) start_indices = Variable(sample_batch[1]) end_indices = Variable(sample_batch[2]) valid_indices = Variable(sample_batch[3]) # gt_index_batch = sample_batch[1].numpy() # score_batch = Variable(sample_batch[2]) if use_cuda: feature_batch = feature_batch.cuda() start_indices = start_indices.cuda() end_indices = end_indices.cuda() gt_positions = torch.stack([start_indices, end_indices], dim=-1) head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores = model( feature_batch) pred_positions = torch.stack([head_positions, tail_positions], dim=-1) assigned_scores, assigned_locations = f_assign.Assign_Batch( gt_positions, pred_positions, valid_indices, thres=0.25) if np.sum(assigned_scores) >= 1: assigned_scores = Variable(torch.LongTensor(assigned_scores), requires_grad=False) assigned_locations = Variable( torch.LongTensor(assigned_locations), requires_grad=False) if use_cuda: assigned_scores = assigned_scores.cuda() assigned_locations = assigned_locations.cuda() cls_scores = cls_scores.contiguous().view( -1, cls_scores.size()[-1]) assigned_scores = assigned_scores.contiguous().view(-1) cls_loss = F.cross_entropy(cls_scores, assigned_scores) assigned_head_positions = assigned_locations[:, :, 0] assigned_head_positions = assigned_head_positions.contiguous( ).view(-1) # assigned_tail_positions = assigned_locations[:, :, 1] assigned_tail_positions = assigned_tail_positions.contiguous( ).view(-1) head_pointer_probs = head_pointer_probs.contiguous().view( -1, head_pointer_probs.size()[-1]) tail_pointer_probs = tail_pointer_probs.contiguous().view( -1, tail_pointer_probs.size()[-1]) # TODO: here changes to Cross entropy since there is no hard constraints prediction_head_loss = F.cross_entropy((head_pointer_probs), assigned_head_positions, reduce=False) prediction_head_loss = torch.mean(prediction_head_loss * assigned_scores.float()) prediction_tail_loss = F.cross_entropy((tail_pointer_probs), assigned_tail_positions, reduce=False) prediction_tail_loss = torch.mean(prediction_tail_loss * assigned_scores.float()) total_loss = alpha * (prediction_head_loss + prediction_tail_loss) + cls_loss # model_optim.zero_grad() # get_dot = graph_vis.register_hooks(prediction_head_loss) # total_loss.backward() dot = graph_vis.make_dot(prediction_tail_loss) dot.save('graph-end-pred.dot') print("Saved")
def main(): global args args = (parser.parse_args()) use_cuda = cuda_model.ifUseCuda(args.gpu_id, args.multiGpu) # Pretty print the run args pp.pprint(vars(args)) model = PointerNetwork(input_dim=args.input_dim, embedding_dim=args.embedding_dim, hidden_dim=args.hidden_dim, max_decoding_len=args.net_outputs) print("Number of Params\t{:d}".format( sum([p.data.nelement() for p in model.parameters()]))) model = cuda_model.convertModel2Cuda(model, gpu_id=args.gpu_id, multiGpu=args.multiGpu) train_dataset = MNIST(train=True, seq_length=args.seq_len, n_outputs=args.n_outputs, data_size=args.dataset_size) val_dataset = MNIST(train=True, seq_length=args.seq_len, n_outputs=args.n_outputs, data_size=1000) train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) # val_dataloader = DataLoader(val_dataset, # batch_size=args.batch_size, # shuffle=True, # num_workers=4) model_optim = optim.Adam(model.parameters(), lr=float(args.lr)) CCE = torch.nn.CrossEntropyLoss() alpha = 0.01 for epoch in range(args.start_epoch, args.nof_epoch): total_losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() Accuracy = AverageMeter() IOU = AverageMeter() ordered_IOU = AverageMeter() model.train() pbar = progressbar.ProgressBar(max_value=len(train_dataloader)) n_effective_batches = 0 for i_batch, sample_batch in enumerate(train_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batch[0]) start_indices = Variable(sample_batch[1]) end_indices = Variable(sample_batch[2]) # gt_index_batch = sample_batch[1].numpy() # score_batch = Variable(sample_batch[2]) if use_cuda: feature_batch = feature_batch.cuda() start_indices = start_indices.cuda() end_indices = end_indices.cuda() gt_positions = torch.stack([start_indices, end_indices], dim=-1) head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores = model( feature_batch) pred_positions = torch.stack([head_positions, tail_positions], dim=-1) assigned_scores, assigned_locations = h_assign_2.Assign_Batch( gt_positions, pred_positions, thres=0.5) correct_predictions = np.sum(assigned_scores[:, :args.n_outputs]) cls_rate = correct_predictions * 1. / np.sum(assigned_scores) iou_rate = Losses.Metrics.get_avg_iou( np.reshape(pred_positions.data.cpu().numpy(), (-1, 2)), np.reshape(assigned_locations, (-1, 2)), np.reshape(assigned_scores, assigned_scores.shape[0] * assigned_scores.shape[1])) _, top_assigned_locations = h_assign_2.Assign_Batch( gt_positions, pred_positions[:, :args.n_outputs, :], thres=0.5) ordered_iou_rate = Losses.Metrics.get_avg_iou( np.reshape( pred_positions[:, :args.n_outputs, :].data.cpu().numpy(), (-1, 2)), np.reshape(top_assigned_locations, (-1, 2))) Accuracy.update(cls_rate, np.sum(assigned_scores)) # iou_rate = Metrics.get_avg_iou(np.reshape(pred_positions.data.cpu().numpy(), (-1, 2)), np.reshape(gt_positions.data.cpu().numpy(), (-1, 2))) IOU.update(iou_rate / (args.batch_size * args.n_outputs), args.batch_size * args.n_outputs) ordered_IOU.update( ordered_iou_rate / (args.batch_size * args.n_outputs), args.batch_size * args.n_outputs) n_effective_batches += 1 assigned_scores = Variable(torch.LongTensor(assigned_scores), requires_grad=False) assigned_locations = Variable(torch.LongTensor(assigned_locations), requires_grad=False) if use_cuda: assigned_scores = assigned_scores.cuda() assigned_locations = assigned_locations.cuda() cls_scores = cls_scores.contiguous().view(-1, cls_scores.size()[-1]) assigned_scores = assigned_scores.contiguous().view(-1) cls_loss = F.cross_entropy(cls_scores, assigned_scores) assigned_head_positions = assigned_locations[:, :, 0] assigned_head_positions = assigned_head_positions.contiguous( ).view(-1) # assigned_tail_positions = assigned_locations[:, :, 0] assigned_tail_positions = assigned_tail_positions.contiguous( ).view(-1) head_pointer_probs = head_pointer_probs.contiguous().view( -1, head_pointer_probs.size()[-1]) tail_pointer_probs = tail_pointer_probs.contiguous().view( -1, tail_pointer_probs.size()[-1]) # start_indices = start_indices.contiguous().view(-1) # end_indices = end_indices.contiguous().view(-1) # with case instances.... prediction_head_loss = F.nll_loss(torch.log(head_pointer_probs + 1e-8), assigned_head_positions, reduce=False) prediction_head_loss = torch.mean(prediction_head_loss * assigned_scores.float()) prediction_tail_loss = F.nll_loss(torch.log(tail_pointer_probs + 1e-8), assigned_tail_positions, reduce=False) prediction_tail_loss = torch.mean(prediction_tail_loss * assigned_scores.float()) total_loss = alpha * (prediction_head_loss + prediction_tail_loss) + cls_loss model_optim.zero_grad() total_loss.backward() torch.nn.utils.clip_grad_norm(model.parameters(), 1.) model_optim.step() cls_losses.update(cls_loss.data[0], feature_batch.size(0)) loc_losses.update( prediction_head_loss.data[0] + prediction_tail_loss.data[0], feature_batch.size(0)) total_losses.update(total_loss.data[0], feature_batch.size(0)) print( "Train -- Epoch :{:06d}, LR: {:.6f},\tloss={:.4f}, \t c-loss:{:.4f}, \tloc-loss:{:.4f}\tcls-Accuracy:{:.4f}\tloc-Avg-IOU:{:.4f}\t topIOU:{:.4f}" .format(epoch, model_optim.param_groups[0]['lr'], total_losses.avg, cls_losses.avg, loc_losses.avg, Accuracy.avg, IOU.avg, ordered_IOU.avg))
def main(): global args args = (parser.parse_args()) use_cuda = cuda_model.ifUseCuda(args.gpu_id, args.multiGpu) ckpt_savedir = 'lstm2heads_proposal_c3d_s4_ckpt' # Pretty print the run args pp.pprint(vars(args)) model = PointerNetwork(input_dim=args.input_dim, embedding_dim=args.embedding_dim, hidden_dim=args.hidden_dim, max_decoding_len=args.net_outputs, dropout=0.5) print("Number of Params\t{:d}".format(sum([p.data.nelement() for p in model.parameters()]))) if args.resume is not None: ckpt_idx = args.fileid ckpt_filename = os.path.join(args.resume, 'checkpoint_{:04d}.pth.tar'.format(ckpt_idx)) assert os.path.isfile(ckpt_filename), 'Error: no checkpoint directory found!' checkpoint = torch.load(ckpt_filename, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint['state_dict'], strict=False) train_iou = checkpoint['IoU'] args.start_epoch = checkpoint['epoch'] print("=> loading checkpoint '{}', current iou: {:.04f}".format(ckpt_filename, train_iou)) model = cuda_model.convertModel2Cuda(model, gpu_id=args.gpu_id, multiGpu=args.multiGpu) train_dataset = THUMOST14(seq_length=args.seq_len, overlap=0.9, sample_rate=4) train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) # val_dataloader = DataLoader(val_dataset, # batch_size=args.batch_size, # shuffle=True, # num_workers=4) model_optim = optim.Adam(filter(lambda p:p.requires_grad, model.parameters()), lr=float(args.lr)) alpha=0.1 for epoch in range(args.start_epoch, args.nof_epoch+args.start_epoch): total_losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() Accuracy = AverageMeter() IOU = AverageMeter() ordered_IOU = AverageMeter() model.train() pbar = progressbar.ProgressBar(max_value=len(train_dataloader)) for i_batch, sample_batch in enumerate(train_dataloader): pbar.update(i_batch) feature_batch = Variable(sample_batch[0]) start_indices = Variable(sample_batch[1]) end_indices = Variable(sample_batch[2]) valid_indices = Variable(sample_batch[3]) # gt_index_batch = sample_batch[1].numpy() # score_batch = Variable(sample_batch[2]) if use_cuda: feature_batch = feature_batch.cuda() start_indices = start_indices.cuda() end_indices = end_indices.cuda() gt_positions = torch.stack([start_indices, end_indices], dim=-1) head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores = model(feature_batch) pred_positions = torch.stack([head_positions, tail_positions], dim=-1) assigned_scores, assigned_locations = h_assign.Assign_Batch(gt_positions, pred_positions, valid_indices, thres=0.5) # if np.sum(assigned_scores) > 1: # print("DEBUG") # correct_predictions = np.sum(assigned_scores[:,:args.n_outputs]) # cls_rate = correct_predictions*1./np.sum(assigned_scores) if np.sum(assigned_scores)>=1: iou_rate, effective_positives = Metrics.get_avg_iou2(np.reshape(pred_positions.data.cpu().numpy(), (-1, 2)), np.reshape(assigned_locations, (-1, 2)), np.reshape(assigned_scores, assigned_scores.shape[ 0] * assigned_scores.shape[ 1])) # _, top_assigned_locations = h_assign_proposal.Assign_Batch(gt_positions, pred_positions[:, : args.n_outputs, :], valid_indices, thres=0.5) # # ordered_iou_rate = Metrics.get_avg_iou(np.reshape(pred_positions[:,:args.n_outputs,:].data.cpu().numpy(), (-1, 2)), # np.reshape(top_assigned_locations, (-1, 2))) # Accuracy.update(cls_rate, np.sum(assigned_scores)) # iou_rate = Metrics.get_avg_iou(np.reshape(pred_positions.data.cpu().numpy(), (-1, 2)), np.reshape(gt_positions.data.cpu().numpy(), (-1, 2))) IOU.update(iou_rate/(effective_positives), effective_positives) # ordered_IOU.update(ordered_iou_rate/(args.batch_size*args.n_outputs),args.batch_size*args.n_outputs) # n_effective_batches += 1 assigned_scores = Variable(torch.LongTensor(assigned_scores),requires_grad=False) assigned_locations = Variable(torch.LongTensor(assigned_locations), requires_grad=False) if use_cuda: assigned_scores = assigned_scores.cuda() assigned_locations = assigned_locations.cuda() cls_scores = cls_scores.contiguous().view(-1, cls_scores.size()[-1]) assigned_scores = assigned_scores.contiguous().view(-1) cls_loss = F.cross_entropy(cls_scores, assigned_scores) assigned_head_positions = assigned_locations[:,:,0] assigned_head_positions = assigned_head_positions.contiguous().view(-1) # assigned_tail_positions = assigned_locations[:,:,1] assigned_tail_positions = assigned_tail_positions.contiguous().view(-1) head_pointer_probs = head_pointer_probs.contiguous().view(-1, head_pointer_probs.size()[-1]) tail_pointer_probs = tail_pointer_probs.contiguous().view(-1, tail_pointer_probs.size()[-1]) # start_indices = start_indices.contiguous().view(-1) # end_indices = end_indices.contiguous().view(-1) # with case instances.... prediction_head_loss = F.cross_entropy((head_pointer_probs), assigned_head_positions, reduce=False) prediction_head_loss = torch.mean(prediction_head_loss * assigned_scores.float()) prediction_tail_loss = F.cross_entropy((tail_pointer_probs), assigned_tail_positions, reduce=False) prediction_tail_loss = torch.mean(prediction_tail_loss * assigned_scores.float()) total_loss = alpha * (prediction_head_loss + prediction_tail_loss) + cls_loss model_optim.zero_grad() total_loss.backward() torch.nn.utils.clip_grad_norm(model.parameters(), 1.) model_optim.step() cls_losses.update(cls_loss.data[0], feature_batch.size(0)) loc_losses.update(prediction_head_loss.data[0] + prediction_tail_loss.data[0], feature_batch.size(0)) total_losses.update(total_loss.data[0], feature_batch.size(0)) print( "Train -- Epoch :{:06d}, LR: {:.6f},\tloss={:.4f}, \t c-loss:{:.4f}, \tloc-loss:{:.4f}\tcls-Accuracy:{:.4f}\tloc-Avg-IOU:{:.4f}\t topIOU:{:.4f}".format( epoch, model_optim.param_groups[0]['lr'], total_losses.avg, cls_losses.avg, loc_losses.avg, Accuracy.avg, IOU.avg, ordered_IOU.avg)) if epoch % 1 == 0: save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'loss':total_losses.avg, 'cls_loss': cls_losses.avg, 'loc_loss': loc_losses.avg, 'IoU': IOU.avg}, (epoch+1), file_direcotry=ckpt_savedir)
def main(): global args args = parser.parse_args() use_cuda = cuda_model.ifUseCuda(args.gpu_id, args.multiGpu) model = PointerNet(args.input_size, args.hidden_size, args.nlayers, args.dropout, args.bidir) model = cuda_model.convertModel2Cuda(model, gpu_id=args.gpu_id, multiGpu=args.multiGpu) useEMD = False if args.loss == 'emd': print "Using EMD Loss" useEMD=True else: print "Using CCE Loss" print "clip size: {:d}".format(args.clip_size) # this is for training local... train_dataset = AugSumMeLoader.Dataset(dataset_name='TVSum', split='train', clip_size=args.clip_size) val_dataset = SumMeLoader.Dataset(dataset_name='TVSum', split='val', clip_size=args.clip_size) train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) CCE = torch.nn.CrossEntropyLoss() model_optim = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr) best_status = {'train_accuracy': 0, 'val_accuracy': 0, 'train_loss': float('inf'), 'val_loss': float('inf')} isBest_status = {'train_accuracy': 0, 'val_accuracy': 0, 'train_loss': 0, 'val_loss': 0} for epoch in range(args.nof_epoch): train_top_1_accuracy = AverageMeter() train_top_3_accuracy = AverageMeter() train_losses = AverageMeter() train_f1_1_accuracy = AverageMeter() train_f1_3_accuracy = AverageMeter() model.train() # train_iterator = tqdm(train_dataloader, unit='Batch') pbar = progressbar.ProgressBar(max_value=len(train_dataloader)) for i_batch, sample_batched in enumerate(train_dataloader): pbar.update(i_batch) # train_iterator.set_description('Train Batch %i/%i' % (epoch + 1, args.nof_epoch)) train_batch = Variable(sample_batched[0]) targetIdx_batch = Variable(sample_batched[1]) target1Hot_batch = Variable(loss_transforms.torchT_indices2D2scores(sample_batched[1], n_classes=args.clip_size)) if use_cuda: train_batch = train_batch.cuda() targetIdx_batch = targetIdx_batch.cuda() target1Hot_batch = target1Hot_batch.cuda() index_vector, segment_score = model(train_batch) index_vector = index_vector.contiguous().view(-1, args.clip_size) target1Hot_batch = target1Hot_batch.view(-1, args.clip_size) targetIdx_batch = targetIdx_batch.view(-1) F1 = Metrics.accuracy_F1(index_vector.data, targetIdx_batch.data, topk=[1, 3]) accuracy = Metrics.accuracy_topN(index_vector.data, targetIdx_batch.data, topk=[1, 3]) if useEMD: loc_loss = losses.EMD_L2(index_vector, target1Hot_batch, needSoftMax=True) else: loc_loss = CCE(index_vector, targetIdx_batch) model_optim.zero_grad() loc_loss.backward() model_optim.step() train_losses.update(loc_loss.data[0], train_batch.size(0)) train_top_1_accuracy.update(accuracy[0][0], train_batch.size(0)) train_top_3_accuracy.update(accuracy[1][0], train_batch.size(0)) train_f1_1_accuracy.update(F1[0], train_batch.size(0)) train_f1_3_accuracy.update(F1[1], train_batch.size(0)) print("Train -- EpochT :{:06d}, LR: {:.6f},\tloss={:.6f}\ttop1={:.4f}\ttop3={:.4f}\tF1_1={:.4f}\tF1_3={:.4f}".format(epoch, model_optim.param_groups[0]['lr'], train_losses.avg, train_top_1_accuracy.avg, train_top_3_accuracy.avg, train_f1_1_accuracy.avg, train_f1_3_accuracy.avg)) if best_status['train_loss'] > train_losses.avg: best_status['train_loss']= train_losses.avg isBest_status['train_loss'] = 1 if best_status['train_accuracy']<train_top_1_accuracy.avg: best_status['train_accuracy'] = train_top_1_accuracy.avg isBest_status['train_accuracy'] = 1 model.eval() val_top_1_accuracy = AverageMeter() val_top_3_accuracy = AverageMeter() val_losses = AverageMeter() val_f1_1_accuracy = AverageMeter() val_f1_3_accuracy = AverageMeter() pbar = progressbar.ProgressBar(max_value=len(val_dataloader)) for i_batch, sample_batched in enumerate(val_dataloader): pbar.update(i_batch) test_batch = Variable(sample_batched[0]) targetIdx_batch = Variable(sample_batched[1]) target1Hot_batch = Variable(loss_transforms.torchT_indices2D2scores(sample_batched[1], n_classes=args.clip_size)) if use_cuda: test_batch = test_batch.cuda() targetIdx_batch = targetIdx_batch.cuda() target1Hot_batch = target1Hot_batch.cuda() index_vector, segment_score = model(test_batch) index_vector = index_vector.contiguous().view(-1, args.clip_size) target1Hot_batch = target1Hot_batch.view(-1, args.clip_size) targetIdx_batch = targetIdx_batch.view(-1) accuracy = Metrics.accuracy_topN(index_vector.data, targetIdx_batch.data, topk=[1, 3]) F1 = Metrics.accuracy_F1(index_vector.data, targetIdx_batch.data, topk=[1, 3]) if useEMD: loc_loss = losses.EMD_L2(index_vector, target1Hot_batch, needSoftMax=True) else: loc_loss = CCE(index_vector, targetIdx_batch) val_losses.update(loc_loss.data[0], test_batch.size(0)) val_top_1_accuracy.update(accuracy[0][0], test_batch.size(0)) val_top_3_accuracy.update(accuracy[1][0], test_batch.size(0)) val_f1_1_accuracy.update(F1[0], test_batch.size(0)) val_f1_3_accuracy.update(F1[1], test_batch.size(0)) print("Test :{:06d}, LR: {:.6f},\tloss={:.6f}\ttop1={:.4f}\ttop3={:.4f}\tF1_1={:.4f}\tF1_3={:.4f}".format(epoch, model_optim.param_groups[ 0]['lr'], val_losses.avg, val_top_1_accuracy.avg, val_top_3_accuracy.avg,val_f1_1_accuracy.avg, val_f1_3_accuracy.avg)) if best_status['val_loss'] > val_losses.avg: best_status['val_loss'] = val_losses.avg isBest_status['val_loss']=1 if best_status['val_accuracy'] < val_top_1_accuracy.avg: best_status['val_accuracy'] = val_top_3_accuracy.avg isBest_status['val_accuracy']=1 save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'val_loss':best_status['val_loss'], 'val_accuracy': best_status['val_accuracy'], 'train_loss': best_status['train_loss'], 'train_accuracy': best_status['train_accuracy'] }, isBest_status, file_direcotry='vsSum_Segment') for item in isBest_status.keys(): isBest_status[item]=0