def train(epoch):
        print('\n Epoch: %d' % epoch)
        model.train()
        tic = time.perf_counter()
        for batch_idx, batch in enumerate(train_loader):
            for k in batch:
                if k != 'meta':
                    batch[k] = batch[k].to(device=cfg.device,
                                           non_blocking=True)

            outputs = model(batch['image'])
            hmap, regs, w_h_, real_, imag_ = zip(*outputs)

            regs = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in regs
            ]
            w_h_ = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in w_h_
            ]
            real_ = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in real_
            ]
            imag_ = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in imag_
            ]

            hmap_loss = _neg_loss(hmap, batch['hmap'])
            reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks'])
            w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks'])
            real_loss = norm_reg_loss(real_, batch['real'], batch['ind_masks'])
            imag_loss = norm_reg_loss(imag_, batch['imaginary'],
                                      batch['ind_masks'])
            loss = hmap_loss + 1 * reg_loss + 0.1 * w_h_loss + cfg.code_loss_weight * (
                real_loss + imag_loss)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if batch_idx % cfg.log_interval == 0:
                duration = time.perf_counter() - tic
                tic = time.perf_counter()
                print(
                    '[%d/%d-%d/%d] ' %
                    (epoch, cfg.num_epochs, batch_idx, len(train_loader)) +
                    ' hmap_loss= %.3f reg_loss= %.3f offset_loss= %.3f real_loss= %.3f imag_loss= %.3f'
                    % (hmap_loss.item(), reg_loss.item(), w_h_loss.item(),
                       real_loss.item(), imag_loss.item()) +
                    ' (%d samples/sec)' %
                    (cfg.batch_size * cfg.log_interval / duration))

                step = len(train_loader) * epoch + batch_idx
                summary_writer.add_scalar('hmap_loss', hmap_loss.item(), step)
                summary_writer.add_scalar('reg_loss', reg_loss.item(), step)
                summary_writer.add_scalar('offset_loss', w_h_loss.item(), step)
                summary_writer.add_scalar('real_loss', real_loss.item(), step)
                summary_writer.add_scalar('imag_loss', imag_loss.item(), step)
        return
示例#2
0
    def train(epoch):
        print('\n Epoch: %d' % epoch)
        model.train()
        tic = time.perf_counter()
        for batch_idx, batch in enumerate(train_loader):
            for k in batch:
                if k != 'meta':
                    batch[k] = batch[k].to(device=cfg.device, non_blocking=True)

            dict_tensor = torch.from_numpy(dictionary.astype(np.float32)).to(cfg.device, non_blocking=True)
            dict_tensor.requires_grad=False

            outputs = model(batch['image'])
            hmap, regs, w_h_, codes_, offsets_ = zip(*outputs)

            regs = [_tranpose_and_gather_feature(r, batch['inds']) for r in regs]
            w_h_ = [_tranpose_and_gather_feature(r, batch['inds']) for r in w_h_]
            codes_ = [_tranpose_and_gather_feature(r, batch['inds']) for r in codes_]
            offsets_ = [_tranpose_and_gather_feature(r, batch['inds']) for r in offsets_]
            shapes_ = [torch.matmul(c, dict_tensor) + o for c in codes_ for o in offsets_]
            # shapes_ = torch.matmul(codes_, dict_tensor) + offsets_

            hmap_loss = _neg_loss(hmap, batch['hmap'])
            reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks'])
            w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks'])
            codes_loss = norm_reg_loss(codes_, batch['codes'], batch['ind_masks'])
            shapes_loss = contour_mapping_loss(codes_, shapes_, batch['shapes'], batch['ind_masks'], roll=False)
            loss = 2 * hmap_loss + 1 * reg_loss + 0.1 * w_h_loss + cfg.code_loss_weight * codes_loss + \
                   cfg.shape_loss_weight * shapes_loss

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if batch_idx % cfg.log_interval == 0:
                duration = time.perf_counter() - tic
                tic = time.perf_counter()
                print('[%d/%d-%d/%d] ' % (epoch, cfg.num_epochs, batch_idx, len(train_loader)) +
                      ' hmap_loss= %.3f reg_loss= %.3f offset_loss= %.3f  code_loss= %.3f  shape_loss= %.3f' %
                      (hmap_loss.item(), reg_loss.item(), w_h_loss.item(), codes_loss.item(), shapes_loss.item()) +
                      ' (%d samples/sec)' % (cfg.batch_size * cfg.log_interval / duration))

                step = len(train_loader) * epoch + batch_idx
                summary_writer.add_scalar('hmap_loss', hmap_loss.item(), step)
                summary_writer.add_scalar('reg_loss', reg_loss.item(), step)
                summary_writer.add_scalar('offset_loss', w_h_loss.item(), step)
                summary_writer.add_scalar('code_loss', codes_loss.item(), step)
                summary_writer.add_scalar('shape_loss', shapes_loss.item(), step)
        return
    def train(epoch):
        print_log('\n Epoch: %d' % epoch)
        model.train()
        # torch.autograd.set_detect_anomaly(mode=True)

        tic = time.perf_counter()
        for batch_idx, batch in enumerate(train_loader):
            for k in batch:
                if k != 'meta':
                    batch[k] = batch[k].to(device=cfg.device,
                                           non_blocking=True)

            dict_tensor = torch.from_numpy(dictionary.astype(np.float32)).to(
                cfg.device, non_blocking=True)
            dict_tensor.requires_grad = False

            outputs = model(batch['image'])
            # hmap, regs, w_h_, codes_1, codes_2, codes_3, offsets = zip(*outputs)
            hmap, regs, w_h_, codes, offsets = zip(*outputs)

            regs = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in regs
            ]
            w_h_ = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in w_h_
            ]
            codes = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in codes
            ]
            # c_2 = [_tranpose_and_gather_feature(r, batch['inds']) for r in codes_2]
            # c_3 = [_tranpose_and_gather_feature(r, batch['inds']) for r in codes_3]
            offsets = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in offsets
            ]

            hmap_loss = _neg_loss(hmap, batch['hmap'])
            reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks'])
            w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks'])
            offsets_loss = _reg_loss(offsets, batch['offsets'],
                                     batch['ind_masks'])
            # codes_loss = (norm_reg_loss(c_1, batch['codes'], batch['ind_masks'], sparsity=0.)
            #               + norm_reg_loss(c_2, batch['codes'], batch['ind_masks'], sparsity=0.)
            #               + norm_reg_loss(c_3, batch['codes'], batch['ind_masks'], sparsity=0.)) / 3.

            if cfg.code_loss == 'norm':
                codes_loss = norm_reg_loss(codes,
                                           batch['codes'],
                                           batch['ind_masks'],
                                           sparsity=0.)
            elif cfg.code_loss == 'adapt':
                codes_loss = adapt_norm_reg_loss(codes,
                                                 batch['codes'],
                                                 batch['ind_masks'],
                                                 sparsity=0.,
                                                 norm=cfg.adapt_norm)
            elif cfg.code_loss == 'wing':
                codes_loss = wing_norm_reg_loss(codes,
                                                batch['codes'],
                                                batch['ind_masks'],
                                                sparsity=0.,
                                                epsilon=cfg.wing_epsilon,
                                                omega=cfg.wing_omega)
            else:
                print('Loss type for code not implemented yet.')
                raise NotImplementedError

            loss = 1. * hmap_loss + 1. * reg_loss + 0.1 * w_h_loss + 0.1 * offsets_loss + \
                   cfg.code_loss_weight * codes_loss

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if batch_idx % cfg.log_interval == 0:
                duration = time.perf_counter() - tic
                tic = time.perf_counter()
                print_log(
                    '[%d/%d-%d/%d] ' %
                    (epoch, cfg.num_epochs, batch_idx, len(train_loader)) +
                    'Loss: hmap = %.3f reg = %.3f w_h = %.3f code = %.3f offsets = %.3f'
                    % (hmap_loss.item(), reg_loss.item(), w_h_loss.item(),
                       codes_loss.item(), offsets_loss.item()) +
                    ' (%d samples/sec)' %
                    (cfg.batch_size * cfg.log_interval / duration))

                step = len(train_loader) * epoch + batch_idx
                summary_writer.add_scalar('hmap_loss', hmap_loss.item(), step)
                summary_writer.add_scalar('reg_loss', reg_loss.item(), step)
                summary_writer.add_scalar('w_h_loss', w_h_loss.item(), step)
                summary_writer.add_scalar('offset_loss', offsets_loss.item(),
                                          step)
                summary_writer.add_scalar('code_loss', codes_loss.item(), step)
        return
示例#4
0
    def train(epoch):
        print_log('\n Epoch: %d' % epoch)
        model.train()
        tic = time.perf_counter()
        for batch_idx, batch in enumerate(train_loader):
            for k in batch:
                if k != 'meta':
                    batch[k] = batch[k].to(device=cfg.device,
                                           non_blocking=True)

            dict_tensor = torch.from_numpy(dictionary.astype(np.float32)).to(
                cfg.device, non_blocking=True)
            dict_tensor.requires_grad = False

            outputs = model(batch['image'])
            hmap, regs, w_h_, codes_1, codes_2, codes_3, offsets = zip(
                *outputs)

            regs = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in regs
            ]
            w_h_ = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in w_h_
            ]
            c_1 = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in codes_1
            ]
            c_2 = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in codes_2
            ]
            c_3 = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in codes_3
            ]
            offsets = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in offsets
            ]

            # shapes_1 = [torch.matmul(c, dict_tensor) for c in c_1]
            # shapes_2 = [torch.matmul(c, dict_tensor) for c in c_2]
            # shapes_3 = [torch.matmul(c, dict_tensor) for c in c_3]

            hmap_loss = _neg_loss(hmap, batch['hmap'])
            # occ_loss = _neg_loss(occ_map, batch['occ_map'], ex=4.0)
            reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks'])
            w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks'])
            offsets_loss = _reg_loss(offsets, batch['offsets'],
                                     batch['ind_masks'])
            codes_loss = (
                norm_reg_loss(c_1, batch['codes'], batch['ind_masks']) +
                norm_reg_loss(c_2, batch['codes'], batch['ind_masks']) +
                norm_reg_loss(c_3, batch['codes'], batch['ind_masks'])) / 3.

            # cmm_loss = (contour_mapping_loss(c_1, shapes_1, batch['shapes'], batch['ind_masks'], roll=False)
            #             + contour_mapping_loss(c_2, shapes_2, batch['shapes'], batch['ind_masks'], roll=False)
            #             + contour_mapping_loss(c_3, shapes_3, batch['shapes'], batch['ind_masks'], roll=False)) / 3.
            # cmm_loss = (_reg_loss(shapes_1, batch['shapes'], batch['ind_masks'])
            #             + _reg_loss(shapes_2, batch['shapes'], batch['ind_masks'])
            #             + _reg_loss(shapes_3, batch['shapes'], batch['ind_masks'])) / 3.

            # loss = 1. * hmap_loss + 1 * reg_loss + 0.1 * w_h_loss + cfg.cmm_loss_weight * cmm_loss \
            #        + cfg.code_loss_weight * codes_loss + 0.1 * offsets_loss
            loss = 1 * hmap_loss + 1 * reg_loss + 0.1 * w_h_loss \
                   + cfg.code_loss_weight * codes_loss + 0.1 * offsets_loss

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if batch_idx % cfg.log_interval == 0:
                duration = time.perf_counter() - tic
                tic = time.perf_counter()
                print_log(
                    '[%d/%d-%d/%d] ' %
                    (epoch, cfg.num_epochs, batch_idx, len(train_loader)) +
                    'Loss: hmap = %.3f reg = %.3f w_h = %.3f code = %.3f offsets = %.3f'
                    % (hmap_loss.item(), reg_loss.item(), w_h_loss.item(),
                       codes_loss.item(), offsets_loss.item()) +
                    ' (%d samples/sec)' %
                    (cfg.batch_size * cfg.log_interval / duration))

                step = len(train_loader) * epoch + batch_idx
                summary_writer.add_scalar('hmap_loss', hmap_loss.item(), step)
                # summary_writer.add_scalar('occ_loss', occ_loss.item(), step)
                summary_writer.add_scalar('reg_loss', reg_loss.item(), step)
                summary_writer.add_scalar('w_h_loss', w_h_loss.item(), step)
                summary_writer.add_scalar('offset_loss', offsets_loss.item(),
                                          step)
                summary_writer.add_scalar('code_loss', codes_loss.item(), step)
                # summary_writer.add_scalar('cmm_loss', cmm_loss.item(), step)
        return
    def train(epoch):
        print_log('\n Epoch: %d' % epoch)
        model.train()
        tic = time.perf_counter()
        for batch_idx, batch in enumerate(train_loader):
            for k in batch:
                if k != 'meta':
                    batch[k] = batch[k].to(device=cfg.device,
                                           non_blocking=True)

            # dict_tensor = torch.from_numpy(dictionary.astype(np.float32)).to(cfg.device, non_blocking=True)
            # dict_tensor.requires_grad = False

            outputs = model(batch['image'])
            hmap, regs, w_h_, codes_1, offsets, votes = zip(*outputs)

            regs = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in regs
            ]
            w_h_ = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in w_h_
            ]
            c_1 = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in codes_1
            ]
            # c_2 = [_tranpose_and_gather_feature(r, batch['inds']) for r in codes_2]
            # c_3 = [_tranpose_and_gather_feature(r, batch['inds']) for r in codes_3]
            offsets = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in offsets
            ]
            votes = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in votes
            ]

            hmap_loss = _neg_loss(hmap, batch['hmap'])
            vote_loss = _bce_loss(votes, batch['votes'], batch['ind_masks'])
            reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks'])
            w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks'])
            offsets_loss = _reg_loss(offsets, batch['offsets'],
                                     batch['ind_masks'])
            # codes_loss = (norm_reg_loss(c_1, batch['codes'], batch['ind_masks'], sparsity=0.)
            #               + norm_reg_loss(c_2, batch['codes'], batch['ind_masks'], sparsity=0.)
            #               + norm_reg_loss(c_3, batch['codes'], batch['ind_masks'], sparsity=0.)) / 3.

            codes_loss = norm_reg_loss(c_1,
                                       batch['codes'],
                                       batch['ind_masks'],
                                       sparsity=0.0)

            loss = 1. * hmap_loss + 1. * reg_loss + 0.1 * w_h_loss + 0.1 * offsets_loss + \
                   cfg.vote_loss_weight * vote_loss + cfg.code_loss_weight * codes_loss

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if batch_idx % cfg.log_interval == 0:
                duration = time.perf_counter() - tic
                tic = time.perf_counter()
                print_log(
                    '[%d/%d-%d/%d] ' %
                    (epoch, cfg.num_epochs, batch_idx, len(train_loader)) +
                    'Loss: hmap = %.3f reg = %.3f w_h = %.3f code = %.3f offsets = %.3f votes = %.3f'
                    % (hmap_loss.item(), reg_loss.item(), w_h_loss.item(),
                       codes_loss.item(), offsets_loss.item(),
                       vote_loss.item()) + ' (%d samples/sec)' %
                    (cfg.batch_size * cfg.log_interval / duration))

                step = len(train_loader) * epoch + batch_idx
                summary_writer.add_scalar('hmap_loss', hmap_loss.item(), step)
                summary_writer.add_scalar('vote_loss', vote_loss.item(), step)
                summary_writer.add_scalar('reg_loss', reg_loss.item(), step)
                summary_writer.add_scalar('w_h_loss', w_h_loss.item(), step)
                summary_writer.add_scalar('offset_loss', offsets_loss.item(),
                                          step)
                summary_writer.add_scalar('code_loss', codes_loss.item(), step)
        return
    def train(epoch):
        print_log('\n Epoch: %d' % epoch)
        model.train()
        tic = time.perf_counter()
        for batch_idx, batch in enumerate(train_loader):
            for k in batch:
                if k != 'meta':
                    batch[k] = batch[k].to(device=cfg.device,
                                           non_blocking=True)

            dict_tensor = torch.from_numpy(dictionary.astype(np.float32)).to(
                cfg.device, non_blocking=True)
            dict_tensor.requires_grad = False

            outputs = model(batch['image'])
            hmap, regs, w_h_, codes, offsets, votes = zip(*outputs)

            regs = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in regs
            ]
            w_h_ = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in w_h_
            ]
            codes = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in codes
            ]
            votes = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in votes
            ]
            offsets = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in offsets
            ]
            shapes = [torch.matmul(c, dict_tensor) for c in codes]

            hmap_loss = _neg_loss(hmap, batch['hmap'])
            bce_loss = _bce_loss(votes, batch['votes'], batch['ind_masks'])
            reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks'])
            w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks'])
            offsets_loss = _reg_loss(offsets, batch['offsets'],
                                     batch['ind_masks'])

            if cfg.code_loss == 'norm':
                codes_loss = norm_reg_loss(codes,
                                           batch['codes'],
                                           batch['ind_masks'],
                                           sparsity=0.01)
            elif cfg.code_loss == 'adapt':
                codes_loss = adapt_norm_reg_loss(codes,
                                                 batch['codes'],
                                                 batch['ind_masks'],
                                                 norm=cfg.adapt_norm)
            elif cfg.code_loss == 'wing':
                codes_loss = wing_norm_reg_loss(codes,
                                                batch['codes'],
                                                batch['ind_masks'],
                                                sparsity=0.01,
                                                epsilon=cfg.wing_epsilon,
                                                omega=cfg.wing_omega)
            elif cfg.code_loss == 'mse':
                codes_loss = mse_reg_loss(codes,
                                          batch['codes'],
                                          batch['ind_masks'],
                                          sparsity=0.001)
            else:
                print('Loss type for code not implemented yet.')
                raise NotImplementedError

            if cfg.shape_loss == 'cmm':
                shape_loss = contour_mapping_loss(codes,
                                                  shapes,
                                                  batch['shapes'],
                                                  batch['ind_masks'],
                                                  roll=False)
            elif cfg.shape_loss == 'piou':
                shape_loss = 0.
                for i in range(len(shapes)):
                    shape_loss += PIoU_loss(pred_shapes=shapes[i],
                                            gt_shapes=batch['shapes'],
                                            mask=batch['ind_masks'])
            else:
                print('Loss type for shape not implemented yet.')
                raise NotImplementedError

            loss = 1 * hmap_loss + 1 * reg_loss + 0.1 * w_h_loss + cfg.code_loss_weight * codes_loss \
                   + 0.5 * offsets_loss + cfg.shape_loss_weight * shape_loss + cfg.bce_loss_weight * bce_loss

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if batch_idx % cfg.log_interval == 0:
                duration = time.perf_counter() - tic
                tic = time.perf_counter()
                print_log(
                    '[%d/%d-%d/%d] ' %
                    (epoch, cfg.num_epochs, batch_idx, len(train_loader)) +
                    'Loss: hmap = %.3f reg = %.3f w_h = %.3f code = %.3f offsets = %.3f shape = %.3f vote = %.3f'
                    %
                    (hmap_loss.item(), reg_loss.item(), w_h_loss.item(),
                     codes_loss.item(), offsets_loss.item(), shape_loss.item(),
                     bce_loss.item()) + ' (%d samples/sec)' %
                    (cfg.batch_size * cfg.log_interval / duration))

                step = len(train_loader) * epoch + batch_idx
                summary_writer.add_scalar('hmap_loss', hmap_loss.item(), step)
                summary_writer.add_scalar('reg_loss', reg_loss.item(), step)
                summary_writer.add_scalar('w_h_loss', w_h_loss.item(), step)
                summary_writer.add_scalar('offset_loss', offsets_loss.item(),
                                          step)
                summary_writer.add_scalar('code_loss', codes_loss.item(), step)
                summary_writer.add_scalar('vote_loss', bce_loss.item(), step)
                summary_writer.add_scalar('shape_loss', shape_loss.item(),
                                          step)
        return
    def train(epoch):
        print('\n Epoch: %d' % epoch)
        model.train()
        tic = time.perf_counter()
        for batch_idx, batch in enumerate(train_loader):
            for k in batch:
                if k != 'meta':
                    batch[k] = batch[k].to(device=cfg.device,
                                           non_blocking=True)

            dict_tensor = torch.from_numpy(dictionary.astype(np.float32)).to(
                cfg.device, non_blocking=True)
            dict_tensor.requires_grad = False

            outputs = model(batch['image'], batch['inds'], batch['centers'])
            hmap, regs, w_h_, codes_, polys_1, polys_2, shapes_ = zip(*outputs)

            regs = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in regs
            ]
            w_h_ = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in w_h_
            ]
            codes_ = [
                _tranpose_and_gather_feature(r, batch['inds']) for r in codes_
            ]

            hmap_loss = _neg_loss(hmap, batch['hmap'])
            reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks'])
            w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks'])
            codes_loss = norm_reg_loss(codes_, batch['codes'],
                                       batch['ind_masks'])

            # shapes_loss = contour_mapping_loss(codes_, shapes_, batch['shapes'], batch['ind_masks'], roll=False)
            mask = batch['ind_masks'][:, :,
                                      None].expand_as(batch['shapes']).float()
            shapes_loss = (sum([
                nn.functional.l1_loss(p.view(p.size(0), p.size(1), -1) * mask,
                                      batch['shapes'] * mask,
                                      reduction='sum') / (mask.sum() + 1e-4)
                for p in polys_1
            ]) + sum([
                nn.functional.l1_loss(p.view(p.size(0), p.size(1), -1) * mask,
                                      batch['shapes'] * mask,
                                      reduction='sum') / (mask.sum() + 1e-4)
                for p in polys_2
            ]) + sum([
                nn.functional.l1_loss(p.view(p.size(0), p.size(1), -1) * mask,
                                      batch['shapes'] * mask,
                                      reduction='sum') / (mask.sum() + 1e-4)
                for p in shapes_
            ])) / 3.

            loss = 2 * hmap_loss + 1 * reg_loss + 0.1 * w_h_loss + cfg.code_loss_weight * codes_loss + \
                   cfg.shape_loss_weight * shapes_loss

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if batch_idx % cfg.log_interval == 0:
                duration = time.perf_counter() - tic
                tic = time.perf_counter()
                print(
                    '[%d/%d-%d/%d] ' %
                    (epoch, cfg.num_epochs, batch_idx, len(train_loader)) +
                    ' hmap_loss= %.3f reg_loss= %.3f offset_loss= %.3f  code_loss= %.3f  shape_loss= %.3f'
                    % (hmap_loss.item(), reg_loss.item(), w_h_loss.item(),
                       codes_loss.item(), shapes_loss.item()) +
                    ' (%d samples/sec)' %
                    (cfg.batch_size * cfg.log_interval / duration))

                step = len(train_loader) * epoch + batch_idx
                summary_writer.add_scalar('hmap_loss', hmap_loss.item(), step)
                summary_writer.add_scalar('reg_loss', reg_loss.item(), step)
                summary_writer.add_scalar('offset_loss', w_h_loss.item(), step)
                summary_writer.add_scalar('code_loss', codes_loss.item(), step)
                summary_writer.add_scalar('shape_loss', shapes_loss.item(),
                                          step)
        return