def train(epoch): print('\n%s Epoch: %d' % (datetime.now(), epoch)) model.train() tic = time.perf_counter() for batch_idx, batch in enumerate(train_loader): for k in batch: batch[k] = batch[k].to(device=cfg.device, non_blocking=True) outputs = model(batch['image']) hmap_tl, hmap_br, embd_tl, embd_br, regs_tl, regs_br = zip( *outputs) embd_tl = [ _tranpose_and_gather_feature(e, batch['inds_tl']) for e in embd_tl ] embd_br = [ _tranpose_and_gather_feature(e, batch['inds_br']) for e in embd_br ] regs_tl = [ _tranpose_and_gather_feature(r, batch['inds_tl']) for r in regs_tl ] regs_br = [ _tranpose_and_gather_feature(r, batch['inds_br']) for r in regs_br ] focal_loss = _neg_loss(hmap_tl, batch['hmap_tl']) + \ _neg_loss(hmap_br, batch['hmap_br']) reg_loss = _reg_loss(regs_tl, batch['regs_tl'], batch['ind_masks']) + \ _reg_loss(regs_br, batch['regs_br'], batch['ind_masks']) pull_loss, push_loss = _ae_loss(embd_tl, embd_br, batch['ind_masks']) loss = focal_loss + 0.1 * pull_loss + 0.1 * push_loss + reg_loss optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx % cfg.log_interval == 0: duration = time.perf_counter() - tic tic = time.perf_counter() print( '[%d/%d-%d/%d] ' % (epoch, cfg.num_epochs, batch_idx, len(train_loader)) + ' focal_loss= %.5f pull_loss= %.5f push_loss= %.5f reg_loss= %.5f' % (focal_loss.item(), pull_loss.item(), push_loss.item(), reg_loss.item()) + ' (%d samples/sec)' % (cfg.batch_size * cfg.log_interval / duration)) step = len(train_loader) * epoch + batch_idx summary_writer.add_scalar('focal_loss', focal_loss.item(), step) summary_writer.add_scalar('pull_loss', pull_loss.item(), step) summary_writer.add_scalar('push_loss', push_loss.item(), step) summary_writer.add_scalar('reg_loss', reg_loss.item(), step) return
def train_epoch(self, data_counts, data_loader, eval_loder, epoch, n_epochs): with tqdm.tqdm(total=data_counts, desc=f'Epoch {epoch}/{n_epochs}', unit='img', ncols=150) as pbar: step = 0 for batch in data_loader: step += 1 load_t0 = time.time() for k in batch: batch[k] = batch[k].to(device=self.cfg.device, non_blocking=True) outputs = self.model(batch['image']) hmap, regs, w_h_ = zip(*outputs) regs = [_tranpose_and_gather_feature(r, batch['inds']) for r in regs] w_h_ = [_tranpose_and_gather_feature(r, batch['inds']) for r in w_h_] hmap_loss = _neg_loss(hmap, batch['hmap']) reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks']) w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks']) loss = hmap_loss + 1 * reg_loss + 0.1 * w_h_loss self.model.zero_grad() loss.backward() self.optim.step() load_t1 = time.time() batch_time = load_t1 - load_t0 pbar.set_postfix(**{'hmap_loss': hmap_loss.item(), 'reg_loss': reg_loss.item(), 'w_h_loss': w_h_loss.item(), 'LR': self.optim.param_groups[0]['lr'], 'Batchtime': batch_time}) pbar.update(batch['image'].shape[0]) cons_acc = self._evaluate(eval_loder) return cons_acc
def train(epoch): print('\n Epoch: %d' % epoch) model.train() tic = time.perf_counter() for batch_idx, batch in enumerate(train_loader): for k in batch: if k != 'meta': batch[k] = batch[k].to(device=cfg.device, non_blocking=True) outputs = model(batch['image']) hmap, regs, w_h_, real_, imag_ = zip(*outputs) regs = [ _tranpose_and_gather_feature(r, batch['inds']) for r in regs ] w_h_ = [ _tranpose_and_gather_feature(r, batch['inds']) for r in w_h_ ] real_ = [ _tranpose_and_gather_feature(r, batch['inds']) for r in real_ ] imag_ = [ _tranpose_and_gather_feature(r, batch['inds']) for r in imag_ ] hmap_loss = _neg_loss(hmap, batch['hmap']) reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks']) w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks']) real_loss = norm_reg_loss(real_, batch['real'], batch['ind_masks']) imag_loss = norm_reg_loss(imag_, batch['imaginary'], batch['ind_masks']) loss = hmap_loss + 1 * reg_loss + 0.1 * w_h_loss + cfg.code_loss_weight * ( real_loss + imag_loss) optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx % cfg.log_interval == 0: duration = time.perf_counter() - tic tic = time.perf_counter() print( '[%d/%d-%d/%d] ' % (epoch, cfg.num_epochs, batch_idx, len(train_loader)) + ' hmap_loss= %.3f reg_loss= %.3f offset_loss= %.3f real_loss= %.3f imag_loss= %.3f' % (hmap_loss.item(), reg_loss.item(), w_h_loss.item(), real_loss.item(), imag_loss.item()) + ' (%d samples/sec)' % (cfg.batch_size * cfg.log_interval / duration)) step = len(train_loader) * epoch + batch_idx summary_writer.add_scalar('hmap_loss', hmap_loss.item(), step) summary_writer.add_scalar('reg_loss', reg_loss.item(), step) summary_writer.add_scalar('offset_loss', w_h_loss.item(), step) summary_writer.add_scalar('real_loss', real_loss.item(), step) summary_writer.add_scalar('imag_loss', imag_loss.item(), step) return
def train(epoch): print_log('\n Epoch: %d' % epoch) model.train() tic = time.perf_counter() for batch_idx, batch in enumerate(train_loader): for k in batch: if k != 'meta': batch[k] = batch[k].to(device=cfg.device, non_blocking=True) outputs = model(batch['image']) hmap, regs, w_h_, codes_, shapes_ = zip(*outputs) # print('Before gather feature: ', len(regs)) # for c in regs: # print('regs sizes: ', c.size()) regs = [ _tranpose_and_gather_feature(r, batch['inds']) for r in regs ] w_h_ = [ _tranpose_and_gather_feature(r, batch['inds']) for r in w_h_ ] codes_ = [ _tranpose_and_gather_feature(r, batch['inds']) for r in codes_ ] shapes_ = [ _tranpose_and_gather_feature(r, batch['inds']) for r in shapes_ ] hmap_loss = _neg_loss(hmap, batch['hmap']) reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks']) w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks']) cmm_loss = _reg_loss(shapes_, batch['shapes'], batch['ind_masks']) # cmm_loss = contour_mapping_loss(codes_, shapes_, batch['shapes'], batch['ind_masks']) # cmm_loss = norm_contour_mapping_loss(codes_, shapes_, batch['shapes'], batch['w_h_'], batch['ind_masks']) loss = hmap_loss + 1 * reg_loss + 0.1 * w_h_loss + cfg.cmm_loss_weight * cmm_loss optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx % cfg.log_interval == 0: duration = time.perf_counter() - tic tic = time.perf_counter() print_log( '[%d/%d-%d/%d] ' % (epoch, cfg.num_epochs, batch_idx, len(train_loader)) + ' hmap_loss= %.3f reg_loss= %.3f w_h_loss= %.3f cmm_loss= %.3f' % (hmap_loss.item(), reg_loss.item(), w_h_loss.item(), cmm_loss.item()) + ' (%d samples/sec)' % (cfg.batch_size * cfg.log_interval / duration)) step = len(train_loader) * epoch + batch_idx summary_writer.add_scalar('hmap_loss', hmap_loss.item(), step) summary_writer.add_scalar('reg_loss', reg_loss.item(), step) summary_writer.add_scalar('w_h_loss', w_h_loss.item(), step) summary_writer.add_scalar('cmm_loss', cmm_loss.item(), step) return
def train(epoch): print('\n Epoch: %d' % epoch) model.train() tic = time.perf_counter() for batch_idx, batch in enumerate(train_loader): for k in batch: if k != 'meta': batch[k] = batch[k].to(device=cfg.device, non_blocking=True) outputs = model(batch['image']) hmap, regs, w_h_, pxpy = zip(*outputs) # batch * C(channel) * W * H regs = [ _tranpose_and_gather_feature(r, batch['inds']) for r in regs ] pxpy = [ _tranpose_and_gather_feature(r, batch['inds']) for r in pxpy ] w_h_ = [ _tranpose_and_gather_feature(r, batch['inds']) for r in w_h_ ] # batch * K * C= batch * 128 *2 hmap_loss = _neg_loss(hmap, batch['hmap']) reg_loss = _SmoothL1Loss(regs, batch['regs'], batch['ind_masks']) pxpy_loss = _reg_loss(pxpy, batch['pxpy'], batch['ind_masks']) w_h_loss = _SmoothL1Loss(w_h_, batch['w_h_'], batch['ind_masks']) loss = hmap_loss + 10 * reg_loss + 0.1 * w_h_loss + 0.1 * pxpy_loss optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx % cfg.log_interval == 0: duration = time.perf_counter() - tic tic = time.perf_counter() print( '[%d/%d-%d/%d] ' % (epoch, cfg.num_epochs, batch_idx, len(train_loader)) + ' hmap_loss= %.5f reg_loss= %.5f w_h_loss= %.5f pxpy_loss= %.5f' % (hmap_loss.item(), reg_loss.item(), w_h_loss.item(), pxpy_loss.item()) + ' (%d samples/sec)' % (cfg.batch_size * cfg.log_interval / duration)) step = len(train_loader) * epoch + batch_idx summary_writer.add_scalar('hmap_loss', hmap_loss.item(), step) summary_writer.add_scalar('reg_loss', reg_loss.item(), step) summary_writer.add_scalar('w_h_loss', w_h_loss.item(), step) summary_writer.add_scalar('pxpy_loss', pxpy_loss.item(), step) return
def train(epoch): print('\n Epoch: %d' % epoch) model.train() tic = time.perf_counter() for batch_idx, batch in enumerate(train_loader): for k in batch: if k != 'meta': #batch[k] = batch[k].to(device=cfg.device, non_blocking=True) batch[k] = batch[k] outputs = model(batch['image'].to(cfg.device)) hmap, regs, w_h_, theta = zip(*outputs) regs = [ _tranpose_and_gather_feature(r.cpu(), batch['inds']) for r in regs ] w_h_ = [ _tranpose_and_gather_feature(r.cpu(), batch['inds']) for r in w_h_ ] theta = [ _tranpose_and_gather_feature(r.cpu(), batch['inds']) for r in theta ] hmap_loss = _neg_loss(hmap, batch['hmap']) reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks']) w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks']) theta_loss = _theta_loss(theta, batch['theta'], batch['ind_masks']) loss = hmap_loss + 1. * reg_loss + 0.1 * w_h_loss + 1. * theta_loss optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx % cfg.log_interval == 0: duration = time.perf_counter() - tic tic = time.perf_counter() print( '[%d/%d-%d/%d] ' % (epoch, cfg.num_epochs, batch_idx, len(train_loader)) + ' hmap_loss= %.5f reg_loss= %.5f w_h_loss= %.5f theta_loss= %.5f' % (hmap_loss.item(), reg_loss.item(), w_h_loss.item(), theta_loss.item()) + ' (%d samples/sec)' % (cfg.batch_size * cfg.log_interval / duration)) step = len(train_loader) * epoch + batch_idx return
def train(epoch): print('\n Epoch: %d' % epoch) model.train() tic = time.perf_counter() for batch_idx, batch in enumerate(train_loader): for k in batch: if k != 'meta': batch[k] = batch[k].to(device=cfg.device, non_blocking=True) dict_tensor = torch.from_numpy(dictionary.astype(np.float32)).to(cfg.device, non_blocking=True) dict_tensor.requires_grad=False outputs = model(batch['image']) hmap, regs, w_h_, codes_, offsets_ = zip(*outputs) regs = [_tranpose_and_gather_feature(r, batch['inds']) for r in regs] w_h_ = [_tranpose_and_gather_feature(r, batch['inds']) for r in w_h_] codes_ = [_tranpose_and_gather_feature(r, batch['inds']) for r in codes_] offsets_ = [_tranpose_and_gather_feature(r, batch['inds']) for r in offsets_] shapes_ = [torch.matmul(c, dict_tensor) + o for c in codes_ for o in offsets_] # shapes_ = torch.matmul(codes_, dict_tensor) + offsets_ hmap_loss = _neg_loss(hmap, batch['hmap']) reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks']) w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks']) codes_loss = norm_reg_loss(codes_, batch['codes'], batch['ind_masks']) shapes_loss = contour_mapping_loss(codes_, shapes_, batch['shapes'], batch['ind_masks'], roll=False) loss = 2 * hmap_loss + 1 * reg_loss + 0.1 * w_h_loss + cfg.code_loss_weight * codes_loss + \ cfg.shape_loss_weight * shapes_loss optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx % cfg.log_interval == 0: duration = time.perf_counter() - tic tic = time.perf_counter() print('[%d/%d-%d/%d] ' % (epoch, cfg.num_epochs, batch_idx, len(train_loader)) + ' hmap_loss= %.3f reg_loss= %.3f offset_loss= %.3f code_loss= %.3f shape_loss= %.3f' % (hmap_loss.item(), reg_loss.item(), w_h_loss.item(), codes_loss.item(), shapes_loss.item()) + ' (%d samples/sec)' % (cfg.batch_size * cfg.log_interval / duration)) step = len(train_loader) * epoch + batch_idx summary_writer.add_scalar('hmap_loss', hmap_loss.item(), step) summary_writer.add_scalar('reg_loss', reg_loss.item(), step) summary_writer.add_scalar('offset_loss', w_h_loss.item(), step) summary_writer.add_scalar('code_loss', codes_loss.item(), step) summary_writer.add_scalar('shape_loss', shapes_loss.item(), step) return
def train(epoch): print_log('\n Epoch: %d' % epoch) model.train() # torch.autograd.set_detect_anomaly(mode=True) tic = time.perf_counter() for batch_idx, batch in enumerate(train_loader): for k in batch: if k != 'meta': batch[k] = batch[k].to(device=cfg.device, non_blocking=True) dict_tensor = torch.from_numpy(dictionary.astype(np.float32)).to( cfg.device, non_blocking=True) dict_tensor.requires_grad = False outputs = model(batch['image']) # hmap, regs, w_h_, codes_1, codes_2, codes_3, offsets = zip(*outputs) hmap, regs, w_h_, codes, offsets = zip(*outputs) regs = [ _tranpose_and_gather_feature(r, batch['inds']) for r in regs ] w_h_ = [ _tranpose_and_gather_feature(r, batch['inds']) for r in w_h_ ] codes = [ _tranpose_and_gather_feature(r, batch['inds']) for r in codes ] # c_2 = [_tranpose_and_gather_feature(r, batch['inds']) for r in codes_2] # c_3 = [_tranpose_and_gather_feature(r, batch['inds']) for r in codes_3] offsets = [ _tranpose_and_gather_feature(r, batch['inds']) for r in offsets ] hmap_loss = _neg_loss(hmap, batch['hmap']) reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks']) w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks']) offsets_loss = _reg_loss(offsets, batch['offsets'], batch['ind_masks']) # codes_loss = (norm_reg_loss(c_1, batch['codes'], batch['ind_masks'], sparsity=0.) # + norm_reg_loss(c_2, batch['codes'], batch['ind_masks'], sparsity=0.) # + norm_reg_loss(c_3, batch['codes'], batch['ind_masks'], sparsity=0.)) / 3. if cfg.code_loss == 'norm': codes_loss = norm_reg_loss(codes, batch['codes'], batch['ind_masks'], sparsity=0.) elif cfg.code_loss == 'adapt': codes_loss = adapt_norm_reg_loss(codes, batch['codes'], batch['ind_masks'], sparsity=0., norm=cfg.adapt_norm) elif cfg.code_loss == 'wing': codes_loss = wing_norm_reg_loss(codes, batch['codes'], batch['ind_masks'], sparsity=0., epsilon=cfg.wing_epsilon, omega=cfg.wing_omega) else: print('Loss type for code not implemented yet.') raise NotImplementedError loss = 1. * hmap_loss + 1. * reg_loss + 0.1 * w_h_loss + 0.1 * offsets_loss + \ cfg.code_loss_weight * codes_loss optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx % cfg.log_interval == 0: duration = time.perf_counter() - tic tic = time.perf_counter() print_log( '[%d/%d-%d/%d] ' % (epoch, cfg.num_epochs, batch_idx, len(train_loader)) + 'Loss: hmap = %.3f reg = %.3f w_h = %.3f code = %.3f offsets = %.3f' % (hmap_loss.item(), reg_loss.item(), w_h_loss.item(), codes_loss.item(), offsets_loss.item()) + ' (%d samples/sec)' % (cfg.batch_size * cfg.log_interval / duration)) step = len(train_loader) * epoch + batch_idx summary_writer.add_scalar('hmap_loss', hmap_loss.item(), step) summary_writer.add_scalar('reg_loss', reg_loss.item(), step) summary_writer.add_scalar('w_h_loss', w_h_loss.item(), step) summary_writer.add_scalar('offset_loss', offsets_loss.item(), step) summary_writer.add_scalar('code_loss', codes_loss.item(), step) return
def train(epoch): print_log('\n Epoch: %d' % epoch) model.train() tic = time.perf_counter() for batch_idx, batch in enumerate(train_loader): for k in batch: if k != 'meta': batch[k] = batch[k].to(device=cfg.device, non_blocking=True) dict_tensor = torch.from_numpy(dictionary.astype(np.float32)).to( cfg.device, non_blocking=True) dict_tensor.requires_grad = False outputs = model(batch['image']) hmap, regs, w_h_, codes_1, codes_2, codes_3, offsets = zip( *outputs) regs = [ _tranpose_and_gather_feature(r, batch['inds']) for r in regs ] w_h_ = [ _tranpose_and_gather_feature(r, batch['inds']) for r in w_h_ ] c_1 = [ _tranpose_and_gather_feature(r, batch['inds']) for r in codes_1 ] c_2 = [ _tranpose_and_gather_feature(r, batch['inds']) for r in codes_2 ] c_3 = [ _tranpose_and_gather_feature(r, batch['inds']) for r in codes_3 ] offsets = [ _tranpose_and_gather_feature(r, batch['inds']) for r in offsets ] # shapes_1 = [torch.matmul(c, dict_tensor) for c in c_1] # shapes_2 = [torch.matmul(c, dict_tensor) for c in c_2] # shapes_3 = [torch.matmul(c, dict_tensor) for c in c_3] hmap_loss = _neg_loss(hmap, batch['hmap']) # occ_loss = _neg_loss(occ_map, batch['occ_map'], ex=4.0) reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks']) w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks']) offsets_loss = _reg_loss(offsets, batch['offsets'], batch['ind_masks']) codes_loss = ( norm_reg_loss(c_1, batch['codes'], batch['ind_masks']) + norm_reg_loss(c_2, batch['codes'], batch['ind_masks']) + norm_reg_loss(c_3, batch['codes'], batch['ind_masks'])) / 3. # cmm_loss = (contour_mapping_loss(c_1, shapes_1, batch['shapes'], batch['ind_masks'], roll=False) # + contour_mapping_loss(c_2, shapes_2, batch['shapes'], batch['ind_masks'], roll=False) # + contour_mapping_loss(c_3, shapes_3, batch['shapes'], batch['ind_masks'], roll=False)) / 3. # cmm_loss = (_reg_loss(shapes_1, batch['shapes'], batch['ind_masks']) # + _reg_loss(shapes_2, batch['shapes'], batch['ind_masks']) # + _reg_loss(shapes_3, batch['shapes'], batch['ind_masks'])) / 3. # loss = 1. * hmap_loss + 1 * reg_loss + 0.1 * w_h_loss + cfg.cmm_loss_weight * cmm_loss \ # + cfg.code_loss_weight * codes_loss + 0.1 * offsets_loss loss = 1 * hmap_loss + 1 * reg_loss + 0.1 * w_h_loss \ + cfg.code_loss_weight * codes_loss + 0.1 * offsets_loss optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx % cfg.log_interval == 0: duration = time.perf_counter() - tic tic = time.perf_counter() print_log( '[%d/%d-%d/%d] ' % (epoch, cfg.num_epochs, batch_idx, len(train_loader)) + 'Loss: hmap = %.3f reg = %.3f w_h = %.3f code = %.3f offsets = %.3f' % (hmap_loss.item(), reg_loss.item(), w_h_loss.item(), codes_loss.item(), offsets_loss.item()) + ' (%d samples/sec)' % (cfg.batch_size * cfg.log_interval / duration)) step = len(train_loader) * epoch + batch_idx summary_writer.add_scalar('hmap_loss', hmap_loss.item(), step) # summary_writer.add_scalar('occ_loss', occ_loss.item(), step) summary_writer.add_scalar('reg_loss', reg_loss.item(), step) summary_writer.add_scalar('w_h_loss', w_h_loss.item(), step) summary_writer.add_scalar('offset_loss', offsets_loss.item(), step) summary_writer.add_scalar('code_loss', codes_loss.item(), step) # summary_writer.add_scalar('cmm_loss', cmm_loss.item(), step) return
def train(epoch): print_log('\n Epoch: %d' % epoch) model.train() tic = time.perf_counter() for batch_idx, batch in enumerate(train_loader): for k in batch: if k != 'meta': batch[k] = batch[k].to(device=cfg.device, non_blocking=True) # dict_tensor = torch.from_numpy(dictionary.astype(np.float32)).to(cfg.device, non_blocking=True) # dict_tensor.requires_grad = False outputs = model(batch['image']) hmap, regs, w_h_, codes_1, offsets, votes = zip(*outputs) regs = [ _tranpose_and_gather_feature(r, batch['inds']) for r in regs ] w_h_ = [ _tranpose_and_gather_feature(r, batch['inds']) for r in w_h_ ] c_1 = [ _tranpose_and_gather_feature(r, batch['inds']) for r in codes_1 ] # c_2 = [_tranpose_and_gather_feature(r, batch['inds']) for r in codes_2] # c_3 = [_tranpose_and_gather_feature(r, batch['inds']) for r in codes_3] offsets = [ _tranpose_and_gather_feature(r, batch['inds']) for r in offsets ] votes = [ _tranpose_and_gather_feature(r, batch['inds']) for r in votes ] hmap_loss = _neg_loss(hmap, batch['hmap']) vote_loss = _bce_loss(votes, batch['votes'], batch['ind_masks']) reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks']) w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks']) offsets_loss = _reg_loss(offsets, batch['offsets'], batch['ind_masks']) # codes_loss = (norm_reg_loss(c_1, batch['codes'], batch['ind_masks'], sparsity=0.) # + norm_reg_loss(c_2, batch['codes'], batch['ind_masks'], sparsity=0.) # + norm_reg_loss(c_3, batch['codes'], batch['ind_masks'], sparsity=0.)) / 3. codes_loss = norm_reg_loss(c_1, batch['codes'], batch['ind_masks'], sparsity=0.0) loss = 1. * hmap_loss + 1. * reg_loss + 0.1 * w_h_loss + 0.1 * offsets_loss + \ cfg.vote_loss_weight * vote_loss + cfg.code_loss_weight * codes_loss optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx % cfg.log_interval == 0: duration = time.perf_counter() - tic tic = time.perf_counter() print_log( '[%d/%d-%d/%d] ' % (epoch, cfg.num_epochs, batch_idx, len(train_loader)) + 'Loss: hmap = %.3f reg = %.3f w_h = %.3f code = %.3f offsets = %.3f votes = %.3f' % (hmap_loss.item(), reg_loss.item(), w_h_loss.item(), codes_loss.item(), offsets_loss.item(), vote_loss.item()) + ' (%d samples/sec)' % (cfg.batch_size * cfg.log_interval / duration)) step = len(train_loader) * epoch + batch_idx summary_writer.add_scalar('hmap_loss', hmap_loss.item(), step) summary_writer.add_scalar('vote_loss', vote_loss.item(), step) summary_writer.add_scalar('reg_loss', reg_loss.item(), step) summary_writer.add_scalar('w_h_loss', w_h_loss.item(), step) summary_writer.add_scalar('offset_loss', offsets_loss.item(), step) summary_writer.add_scalar('code_loss', codes_loss.item(), step) return
def train(epoch): print_log('\n Epoch: %d' % epoch) model.train() tic = time.perf_counter() for batch_idx, batch in enumerate(train_loader): for k in batch: if k != 'meta': batch[k] = batch[k].to(device=cfg.device, non_blocking=True) dict_tensor = torch.from_numpy(dictionary.astype(np.float32)).to( cfg.device, non_blocking=True) dict_tensor.requires_grad = False outputs = model(batch['image']) hmap, regs, w_h_, codes, offsets, votes = zip(*outputs) regs = [ _tranpose_and_gather_feature(r, batch['inds']) for r in regs ] w_h_ = [ _tranpose_and_gather_feature(r, batch['inds']) for r in w_h_ ] codes = [ _tranpose_and_gather_feature(r, batch['inds']) for r in codes ] votes = [ _tranpose_and_gather_feature(r, batch['inds']) for r in votes ] offsets = [ _tranpose_and_gather_feature(r, batch['inds']) for r in offsets ] shapes = [torch.matmul(c, dict_tensor) for c in codes] hmap_loss = _neg_loss(hmap, batch['hmap']) bce_loss = _bce_loss(votes, batch['votes'], batch['ind_masks']) reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks']) w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks']) offsets_loss = _reg_loss(offsets, batch['offsets'], batch['ind_masks']) if cfg.code_loss == 'norm': codes_loss = norm_reg_loss(codes, batch['codes'], batch['ind_masks'], sparsity=0.01) elif cfg.code_loss == 'adapt': codes_loss = adapt_norm_reg_loss(codes, batch['codes'], batch['ind_masks'], norm=cfg.adapt_norm) elif cfg.code_loss == 'wing': codes_loss = wing_norm_reg_loss(codes, batch['codes'], batch['ind_masks'], sparsity=0.01, epsilon=cfg.wing_epsilon, omega=cfg.wing_omega) elif cfg.code_loss == 'mse': codes_loss = mse_reg_loss(codes, batch['codes'], batch['ind_masks'], sparsity=0.001) else: print('Loss type for code not implemented yet.') raise NotImplementedError if cfg.shape_loss == 'cmm': shape_loss = contour_mapping_loss(codes, shapes, batch['shapes'], batch['ind_masks'], roll=False) elif cfg.shape_loss == 'piou': shape_loss = 0. for i in range(len(shapes)): shape_loss += PIoU_loss(pred_shapes=shapes[i], gt_shapes=batch['shapes'], mask=batch['ind_masks']) else: print('Loss type for shape not implemented yet.') raise NotImplementedError loss = 1 * hmap_loss + 1 * reg_loss + 0.1 * w_h_loss + cfg.code_loss_weight * codes_loss \ + 0.5 * offsets_loss + cfg.shape_loss_weight * shape_loss + cfg.bce_loss_weight * bce_loss optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx % cfg.log_interval == 0: duration = time.perf_counter() - tic tic = time.perf_counter() print_log( '[%d/%d-%d/%d] ' % (epoch, cfg.num_epochs, batch_idx, len(train_loader)) + 'Loss: hmap = %.3f reg = %.3f w_h = %.3f code = %.3f offsets = %.3f shape = %.3f vote = %.3f' % (hmap_loss.item(), reg_loss.item(), w_h_loss.item(), codes_loss.item(), offsets_loss.item(), shape_loss.item(), bce_loss.item()) + ' (%d samples/sec)' % (cfg.batch_size * cfg.log_interval / duration)) step = len(train_loader) * epoch + batch_idx summary_writer.add_scalar('hmap_loss', hmap_loss.item(), step) summary_writer.add_scalar('reg_loss', reg_loss.item(), step) summary_writer.add_scalar('w_h_loss', w_h_loss.item(), step) summary_writer.add_scalar('offset_loss', offsets_loss.item(), step) summary_writer.add_scalar('code_loss', codes_loss.item(), step) summary_writer.add_scalar('vote_loss', bce_loss.item(), step) summary_writer.add_scalar('shape_loss', shape_loss.item(), step) return
def train(epoch): print_log('\n Epoch: %d' % epoch) model.train() tic = time.perf_counter() for batch_idx, batch in enumerate(train_loader): for k in batch: if k != 'meta': batch[k] = batch[k].to(device=cfg.device, non_blocking=True) dict_tensor = torch.from_numpy(dictionary.astype(np.float32)).to( cfg.device, non_blocking=True) dict_tensor.requires_grad = False outputs = model(batch['image']) hmap, regs, w_h_, offsets, codes_1, codes_2, codes_3, active_cls = zip( *outputs) # hmap, regs, w_h_, offsets, active_codes, inactive_codes, active_cls = zip(*outputs) regs = [ _tranpose_and_gather_feature(r, batch['inds']) for r in regs ] w_h_ = [ _tranpose_and_gather_feature(r, batch['inds']) for r in w_h_ ] active_cls = [ _tranpose_and_gather_feature(r, batch['inds']) for r in active_cls ] offsets = [ _tranpose_and_gather_feature(r, batch['inds']) for r in offsets ] active_mask = batch['active'] c_1 = [ _tranpose_and_gather_feature(r, batch['inds']) * active_mask for r in codes_1 ] c_2 = [ _tranpose_and_gather_feature(r, batch['inds']) * active_mask for r in codes_2 ] c_3 = [ _tranpose_and_gather_feature(r, batch['inds']) * active_mask for r in codes_3 ] # active_codes = [_tranpose_and_gather_feature(r, batch['inds']) * active_mask for r in active_codes] mask = batch['ind_masks'][:, :, None].expand_as(batch['active']).float() active_cls_loss = sum( BCE(r * mask, batch['active'] * mask) / (mask.sum() + 1e-4) for r in active_cls) / len(active_cls) # active_cls_loss = _bce_loss(active_cls, batch['active'], batch['ind_masks']) hmap_loss = _neg_loss(hmap, batch['hmap']) reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks']) w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks']) offsets_loss = _reg_loss(offsets, batch['offsets'], batch['ind_masks']) codes_loss = (sparse_reg_loss(c_1, batch['codes'] * active_mask, batch['ind_masks'], sparsity=0) + sparse_reg_loss(c_2, batch['codes'] * active_mask, batch['ind_masks'], sparsity=0) + sparse_reg_loss(c_3, batch['codes'] * active_mask, batch['ind_masks'], sparsity=0)) / 3. # codes_loss = norm_reg_loss(active_codes, batch['codes'] * active_mask, batch['ind_masks']) \ # + norm_reg_loss(inactive_codes, batch['codes'] * inactive_mask, batch['ind_masks']) # cmm_loss = (contour_mapping_loss(c_1, shapes_1, batch['shapes'], batch['ind_masks'], roll=False) # + contour_mapping_loss(c_2, shapes_2, batch['shapes'], batch['ind_masks'], roll=False) # + contour_mapping_loss(c_3, shapes_3, batch['shapes'], batch['ind_masks'], roll=False)) / 3. # cmm_loss = (_reg_loss(shapes_1, batch['shapes'], batch['ind_masks']) # + _reg_loss(shapes_2, batch['shapes'], batch['ind_masks']) # + _reg_loss(shapes_3, batch['shapes'], batch['ind_masks'])) / 3. loss = 1 * hmap_loss + 1 * reg_loss + 0.1 * w_h_loss + cfg.code_loss_weight * codes_loss \ + 0.1 * offsets_loss + cfg.bce_loss_weight * active_cls_loss optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx % cfg.log_interval == 0: duration = time.perf_counter() - tic tic = time.perf_counter() print_log( '[%d/%d-%d/%d] ' % (epoch, cfg.num_epochs, batch_idx, len(train_loader)) + 'Loss: hmap = %.3f reg = %.3f w_h = %.3f code = %.3f offsets = %.3f active = %.3f' % (hmap_loss.item(), reg_loss.item(), w_h_loss.item(), codes_loss.item(), offsets_loss.item(), active_cls_loss.item()) + ' (%d samples/sec)' % (cfg.batch_size * cfg.log_interval / duration)) step = len(train_loader) * epoch + batch_idx summary_writer.add_scalar('hmap_loss', hmap_loss.item(), step) # summary_writer.add_scalar('occ_loss', occ_loss.item(), step) summary_writer.add_scalar('reg_loss', reg_loss.item(), step) summary_writer.add_scalar('w_h_loss', w_h_loss.item(), step) summary_writer.add_scalar('offset_loss', offsets_loss.item(), step) summary_writer.add_scalar('code_loss', codes_loss.item(), step) summary_writer.add_scalar('active_cls_loss', active_cls_loss.item(), step) # summary_writer.add_scalar('cmm_loss', cmm_loss.item(), step) return
def train(epoch): print_log('\n Epoch: %d' % epoch) model.train() tic = time.perf_counter() for batch_idx, batch in enumerate(train_loader): for k in batch: if k != 'meta': batch[k] = batch[k].to(device=cfg.device, non_blocking=True) dict_tensor = torch.from_numpy(dictionary.astype(np.float32)).to(cfg.device, non_blocking=True) dict_tensor.requires_grad = False outputs = model(batch['image'], batch['inds'], batch['centers']) hmap, regs, w_h_, offsets, codes_1, codes_2, codes_3, shapes, shapes_1 = zip(*outputs) regs = [_tranpose_and_gather_feature(r, batch['inds']) for r in regs] w_h_ = [_tranpose_and_gather_feature(r, batch['inds']) for r in w_h_] c_1 = [_tranpose_and_gather_feature(r, batch['inds']) for r in codes_1] c_2 = [_tranpose_and_gather_feature(r, batch['inds']) for r in codes_2] c_3 = [_tranpose_and_gather_feature(r, batch['inds']) for r in codes_3] offsets = [_tranpose_and_gather_feature(r, batch['inds']) for r in offsets] hmap_loss = _neg_loss(hmap, batch['hmap']) reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks']) w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks']) offsets_loss = _reg_loss(offsets, batch['offsets'], batch['ind_masks']) codes_loss = (sparse_reg_loss(c_1, batch['codes'], batch['ind_masks']) + sparse_reg_loss(c_2, batch['codes'], batch['ind_masks']) + sparse_reg_loss(c_3, batch['codes'], batch['ind_masks'])) / 3. # shapes_loss = contour_mapping_loss(codes_, shapes_, batch['shapes'], batch['ind_masks'], roll=False) mask = batch['ind_masks'][:, :, None].expand_as(batch['centered_shapes']).float() shapes_loss = (sum(nn.functional.l1_loss(s * mask, batch['centered_shapes'] * mask, reduction='sum') / (mask.sum() + 1e-4) for s in shapes) \ + sum(nn.functional.l1_loss(s * mask, batch['centered_shapes'] * mask, reduction='sum') / (mask.sum() + 1e-4) for s in shapes_1)) / len(shapes) / 2. * cfg.shape_loss_weight # shapes_loss = (sum([nn.functional.l1_loss(p.view(p.size(0), p.size(1), -1) * mask, batch['shapes'] * mask, reduction='sum') / (mask.sum() + 1e-4) for p in polys_1]) # + sum([nn.functional.l1_loss(p.view(p.size(0), p.size(1), -1) * mask, batch['shapes'] * mask, reduction='sum') / (mask.sum() + 1e-4) for p in polys_2]) # + sum([nn.functional.l1_loss(p.view(p.size(0), p.size(1), -1) * mask, batch['shapes'] * mask, reduction='sum') / (mask.sum() + 1e-4) for p in shapes_])) / 3. loss = 1 * hmap_loss + 1 * reg_loss + 0.1 * w_h_loss + cfg.code_loss_weight * codes_loss + \ shapes_loss + 0.1 * offsets_loss optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx % cfg.log_interval == 0: duration = time.perf_counter() - tic tic = time.perf_counter() print_log('[%d/%d-%d/%d] ' % (epoch, cfg.num_epochs, batch_idx, len(train_loader)) + ' hmap_loss = %.3f reg_loss = %.3f w_h_loss = %.3f offsets = %.3f code_loss = %.3f shapes_loss = %.3f' % (hmap_loss.item(), reg_loss.item(), w_h_loss.item(), offsets_loss.item(), codes_loss.item(), shapes_loss.item()) + ' (%d samples/sec)' % (cfg.batch_size * cfg.log_interval / duration)) step = len(train_loader) * epoch + batch_idx summary_writer.add_scalar('hmap_loss', hmap_loss.item(), step) summary_writer.add_scalar('reg_loss', reg_loss.item(), step) summary_writer.add_scalar('w_h_loss', w_h_loss.item(), step) summary_writer.add_scalar('code_loss', codes_loss.item(), step) summary_writer.add_scalar('shape_loss', shapes_loss.item(), step) summary_writer.add_scalar('offsets_loss', offsets_loss.item(), step) return
由于用的是CenterTrack的nuscenes数据集,则根据CenterTrack的代码格式继续写训练代码 数据集 √ 模型√ loss train ''' outputs = model(batch['image']) #hmap, regs, w_h_ = zip(*outputs) hmap = outputs['hm'] regs = outputs['reg'] w_h_ = outputs['wh'] regs = [_tranpose_and_gather_feature(r, batch['inds']) for r in regs] w_h_ = [_tranpose_and_gather_feature(r, batch['inds']) for r in w_h_] hmap_loss = _neg_loss(hmap, batch['hmap']) reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks']) w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks']) loss = hmap_loss + 1 * reg_loss + 0.1 * w_h_loss optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx % cfg.log_interval == 0: duration = time.perf_counter() - tic tic = time.perf_counter() print('[%d/%d-%d/%d] ' % (epoch, cfg.num_epochs, batch_idx, len(train_loader)) + ' hmap_loss= %.5f reg_loss= %.5f w_h_loss= %.5f' % (hmap_loss.item(), reg_loss.item(), w_h_loss.item()) + ' (%d samples/sec)' % (cfg.batch_size * cfg.log_interval / duration))