def train_one_epoch(train_dataloader, model, optimizer, lr_scheduler, epoch, configs, logger, tb_writer): batch_time = AverageMeter('Time', ':6.3f') data_time = AverageMeter('Data', ':6.3f') losses = AverageMeter('Loss', ':.4e') progress = ProgressMeter(len(train_dataloader), [batch_time, data_time, losses], prefix="Train - Epoch: [{}/{}]".format(epoch, configs.num_epochs)) criterion = Compute_Loss(device=configs.device) num_iters_per_epoch = len(train_dataloader) # switch to train mode model.train() start_time = time.time() for batch_idx, batch_data in enumerate(tqdm(train_dataloader)): data_time.update(time.time() - start_time) metadatas, imgs, targets = batch_data batch_size = imgs.size(0) global_step = num_iters_per_epoch * (epoch - 1) + batch_idx + 1 for k in targets.keys(): targets[k] = targets[k].to(configs.device, non_blocking=True) imgs = imgs.to(configs.device, non_blocking=True).float() outputs = model(imgs) total_loss, loss_stats = criterion(outputs, targets) # For torch.nn.DataParallel case if (not configs.distributed) and (configs.gpu_idx is None): total_loss = torch.mean(total_loss) # compute gradient and perform backpropagation total_loss.backward() if global_step % configs.subdivisions == 0: optimizer.step() # zero the parameter gradients optimizer.zero_grad() # ######################### Sersy ######################################### # Adjust learning rate # if configs.step_lr_in_epoch: # lr_scheduler.step() # if tb_writer is not None: # tb_writer.add_scalar('LR', lr_scheduler.get_lr()[0], global_step) if configs.distributed: reduced_loss = reduce_tensor(total_loss.data, configs.world_size) else: reduced_loss = total_loss.data losses.update(to_python_float(reduced_loss), batch_size) # measure elapsed time # torch.cuda.synchronize() batch_time.update(time.time() - start_time) if tb_writer is not None: if (global_step % configs.tensorboard_freq) == 0: loss_stats['avg_loss'] = losses.avg tb_writer.add_scalars('Train', loss_stats, global_step) # Log message if logger is not None: if (global_step % configs.print_freq) == 0: logger.info(progress.get_message(batch_idx)) start_time = time.time()
def train_one_epoch(train_loader, model, optimizer, epoch, configs, logger): batch_time = AverageMeter('Time', ':6.3f') data_time = AverageMeter('Data', ':6.3f') losses = AverageMeter('Loss', ':.4e') progress = ProgressMeter(len(train_loader), [batch_time, data_time, losses], prefix="Train - Epoch: [{}]".format(epoch)) # switch to train mode model.train() start_time = time.time() for batch_idx, (origin_imgs, resized_imgs, org_ball_pos_xy, global_ball_pos_xy, event_class, target_seg) in enumerate(tqdm(train_loader)): data_time.update(time.time() - start_time) batch_size = resized_imgs.size(0) target_seg = target_seg.to(configs.device, non_blocking=True) resized_imgs = resized_imgs.to(configs.device, non_blocking=True).float() # Only move origin_imgs to cuda if the model has local stage for ball detection if not configs.no_local: origin_imgs = origin_imgs.to(configs.device, non_blocking=True).float() # compute output pred_ball_global, pred_ball_local, pred_events, pred_seg, local_ball_pos_xy, total_loss, _ = model( origin_imgs, resized_imgs, org_ball_pos_xy, global_ball_pos_xy, event_class, target_seg) else: pred_ball_global, pred_ball_local, pred_events, pred_seg, local_ball_pos_xy, total_loss, _ = model( None, resized_imgs, org_ball_pos_xy, global_ball_pos_xy, event_class, target_seg) # For torch.nn.DataParallel case if (not configs.distributed) and (configs.gpu_idx is None): total_loss = torch.mean(total_loss) # zero the parameter gradients optimizer.zero_grad() # compute gradient and perform backpropagation total_loss.backward() optimizer.step() losses.update(total_loss.item(), batch_size) # measure elapsed time batch_time.update(time.time() - start_time) # Log message if logger is not None: if ((batch_idx + 1) % configs.print_freq) == 0: logger.info(progress.get_message(batch_idx)) start_time = time.time() return losses.avg
def evaluate_one_epoch(val_loader, model, epoch, configs, logger): batch_time = AverageMeter('Time', ':6.3f') data_time = AverageMeter('Data', ':6.3f') conf_thresh = 0.5 nms_thresh = 0.5 iou_threshold = 0.5 progress = ProgressMeter(len(val_loader), [batch_time, data_time], prefix="Evaluate - Epoch: [{}/{}]".format( epoch, configs.num_epochs)) labels = [] sample_metrics = [] # List of tuples (TP, confs, pred) # switch to evaluate mode model.eval() with torch.no_grad(): start_time = time.time() for batch_idx, batch_data in enumerate(tqdm(val_loader)): data_time.update(time.time() - start_time) _, imgs, targets = batch_data # Extract labels labels += targets[:, 1].tolist() # Rescale target targets[:, 2:] *= configs.img_size imgs = imgs.to(configs.device, non_blocking=True) outputs = model(imgs) outputs = post_processing(outputs, conf_thresh=conf_thresh, nms_thresh=nms_thresh) sample_metrics += get_batch_statistics_rotated_bbox( outputs, targets, iou_threshold=iou_threshold) # measure elapsed time # torch.cuda.synchronize() batch_time.update(time.time() - start_time) # Log message if logger is not None: if ((batch_idx + 1) % configs.print_freq) == 0: logger.info(progress.get_message(batch_idx)) start_time = time.time() # Concatenate sample statistics true_positives, pred_scores, pred_labels = [ np.concatenate(x, 0) for x in list(zip(*sample_metrics)) ] precision, recall, AP, f1, ap_class = ap_per_class( true_positives, pred_scores, pred_labels, labels) return precision, recall, AP, f1, ap_class
def train_one_epoch(train_loader, model, optimizer, epoch, configs, logger): batch_time = AverageMeter('Time', ':6.3f') data_time = AverageMeter('Data', ':6.3f') losses = AverageMeter('Loss', ':.4e') progress = ProgressMeter(len(train_loader), [batch_time, data_time, losses], prefix="Train - Epoch: [{}/{}]".format( epoch, configs.num_epochs)) # switch to train mode model.train() start_time = time.time() for batch_idx, (resized_imgs, org_ball_pos_xy, global_ball_pos_xy, target_events, target_seg) in enumerate(tqdm(train_loader)): data_time.update(time.time() - start_time) batch_size = resized_imgs.size(0) target_seg = target_seg.to(configs.device, non_blocking=True) resized_imgs = resized_imgs.to(configs.device, non_blocking=True).float() pred_ball_global, pred_ball_local, pred_events, pred_seg, local_ball_pos_xy, total_loss, _ = model( resized_imgs, org_ball_pos_xy, global_ball_pos_xy, target_events, target_seg) # For torch.nn.DataParallel case if (not configs.distributed) and (configs.gpu_idx is None): total_loss = torch.mean(total_loss) # zero the parameter gradients optimizer.zero_grad() # compute gradient and perform backpropagation total_loss.backward() optimizer.step() if configs.distributed: reduced_loss = reduce_tensor(total_loss.data, configs.world_size) else: reduced_loss = total_loss.data losses.update(to_python_float(reduced_loss), batch_size) # measure elapsed time torch.cuda.synchronize() batch_time.update(time.time() - start_time) # Log message if logger is not None: if ((batch_idx + 1) % configs.print_freq) == 0: logger.info(progress.get_message(batch_idx)) start_time = time.time() return losses.avg
def evaluate_mAP(val_loader, model, configs, logger): batch_time = AverageMeter('Time', ':6.3f') data_time = AverageMeter('Data', ':6.3f') progress = ProgressMeter(len(val_loader), [batch_time, data_time], prefix="Evaluation phase...") labels = [] sample_metrics = [] # List of tuples (TP, confs, pred) # switch to evaluate mode model.eval() with torch.no_grad(): start_time = time.time() for batch_idx, batch_data in enumerate(tqdm(val_loader)): metadatas, targets = batch_data batch_size = len(metadatas['img_path']) voxelinput = metadatas['voxels'] coorinput = metadatas['coors'] numinput = metadatas['num_points'] dtype = torch.float32 voxelinputr = torch.tensor(voxelinput, dtype=torch.float32, device=configs.device).to(dtype) coorinputr = torch.tensor(coorinput, dtype=torch.int32, device=configs.device) numinputr = torch.tensor(numinput, dtype=torch.int32, device=configs.device) t1 = time_synchronized() outputs = model(voxelinputr, coorinputr, numinputr) outputs = outputs._asdict() outputs['hm_cen'] = _sigmoid(outputs['hm_cen']) outputs['cen_offset'] = _sigmoid(outputs['cen_offset']) # detections size (batch_size, K, 10) detections = decode(outputs['hm_cen'], outputs['cen_offset'], outputs['direction'], outputs['z_coor'], outputs['dim'], K=configs.K) detections = detections.cpu().numpy().astype(np.float32) detections = post_processingv2(detections, configs.num_classes, configs.down_ratio, configs.peak_thresh) for sample_i in range(len(detections)): # print(output.shape) num = targets['count'][sample_i] # print(targets['batch'][sample_i][:num].shape) target = targets['batch'][sample_i][:num] #print(target[:, 8].tolist()) labels += target[:, 8].tolist() sample_metrics += get_batch_statistics_rotated_bbox( detections, targets, iou_threshold=configs.iou_thresh) t2 = time_synchronized() # measure elapsed time # torch.cuda.synchronize() batch_time.update(time.time() - start_time) # Log message if logger is not None: if ((batch_idx + 1) % configs.print_freq) == 0: logger.info(progress.get_message(batch_idx)) start_time = time.time() # Concatenate sample statistics true_positives, pred_scores, pred_labels = [ np.concatenate(x, 0) for x in list(zip(*sample_metrics)) ] precision, recall, AP, f1, ap_class = ap_per_class( true_positives, pred_scores, pred_labels, labels) return precision, recall, AP, f1, ap_class
def train_one_epoch(train_dataloader, model, optimizer, lr_scheduler, epoch, configs, logger, tb_writer): batch_time = AverageMeter('Time', ':6.3f') data_time = AverageMeter('Data', ':6.3f') losses = AverageMeter('Loss', ':.4e') progress = ProgressMeter(len(train_dataloader), [batch_time, data_time, losses], prefix="Train - Epoch: [{}/{}]".format( epoch, configs.num_epochs)) criterion = Compute_Loss(device=configs.device) num_iters_per_epoch = len(train_dataloader) # switch to train mode model.train() start_time = time.time() for batch_idx, batch_data in enumerate(tqdm(train_dataloader)): data_time.update(time.time() - start_time) metadatas, targets = batch_data batch_size = len(metadatas['img_path']) '''hetmap = np.array(targets['hm_cen'][0], dtype= np.uint8) * 100 hetmap = hetmap.transpose(1,2,0) hetmap = cv2.resize(hetmap,(800,800)) global count hetmap = hetmap.transpose(2,0,1) tb_writer.add_image('traget{}'.format(count), hetmap)''' voxelinput = metadatas['voxels'] coorinput = metadatas['coors'] numinput = metadatas['num_points'] global_step = num_iters_per_epoch * (epoch - 1) + batch_idx + 1 for k in targets.keys(): targets[k] = targets[k].to(configs.device, non_blocking=True) dtype = torch.float32 voxelinputr = torch.tensor(voxelinput, dtype=torch.float32, device=configs.device).to(dtype) coorinputr = torch.tensor(coorinput, dtype=torch.int32, device=configs.device) numinputr = torch.tensor(numinput, dtype=torch.int32, device=configs.device) #print('coor. {}'.format(coorinputr.shape)) outputs = model(voxelinputr, coorinputr, numinputr) #print(type(outputs)) #outputs = outputs._asdict() '''outhetmap = np.array(outputs['hm_cen'][0].cpu().detach().numpy(), dtype= np.uint8) * 100 outhetmap = outhetmap.transpose(1,2,0) outhetmap = cv2.resize(outhetmap,(800,800)) outhetmap = outhetmap.transpose(2,0,1) tb_writer.add_image('output{}'.format(count), outhetmap)''' #count += 1 #box_preds = outputs.view(batch_size, -1, 7) total_loss, loss_stats = criterion(outputs, targets) # For torch.nn.DataParallel case if (not configs.distributed) and (configs.gpu_idx is None): total_loss = torch.mean(total_loss) # compute gradient and perform backpropagation total_loss.backward() if global_step % configs.subdivisions == 0: optimizer.step() # zero the parameter gradients optimizer.zero_grad() # Adjust learning rate if configs.step_lr_in_epoch: lr_scheduler.step() if tb_writer is not None: tb_writer.add_scalar('LR', lr_scheduler.get_lr()[0], global_step) if configs.distributed: reduced_loss = reduce_tensor(total_loss.data, configs.world_size) else: reduced_loss = total_loss.data losses.update(to_python_float(reduced_loss), batch_size) # measure elapsed time # torch.cuda.synchronize() batch_time.update(time.time() - start_time) if tb_writer is not None: if (global_step % configs.tensorboard_freq) == 0: loss_stats['avg_loss'] = losses.avg tb_writer.add_scalars('Train', loss_stats, global_step) # Log message if logger is not None: if (global_step % configs.print_freq) == 0: logger.info(progress.get_message(batch_idx)) start_time = time.time()