Пример #1
0
                    type=list,
                    help='GPU to use for loss calculation')
parser.add_argument('--cleanup',
                    default=True,
                    type=str2bool,
                    help='Cleanup and remove results files following eval')
parser.add_argument('--tocaffe',
                    default=False,
                    type=bool,
                    help='trans to caffe or not')

args = parser.parse_args()

if __name__ == '__main__':
    tb_writer, cfg_path, snapshot_dir, log_dir = setup_folder(args,
                                                              cfg,
                                                              phase='eval')
    merge_cfg_from_file(cfg_path)
    cfg.DATASET.NUM_EVAL_PICS = 0

    # args.trained_model = './results/vgg16_ssd_coco_24.4.pth'
    # args.trained_model = './results/ssd300_mAP_77.43_v2.pth'
    args.trained_model = 'dangercar_5000.pth'
    model_dir = osp.join(snapshot_dir, args.trained_model)
    print('eval model:{}'.format(model_dir))

    setup_cuda(cfg, args.cuda, args.devices)

    np.set_printoptions(precision=3, suppress=True, edgeitems=4)
    loader = dataset_factory(phase='eval', cfg=cfg)
def train():
    tb_writer, cfg_path, snapshot_dir, log_dir = setup_folder(args, cfg)
    step_index = 0

    train_loader = dataset_factory(phase='train', cfg=cfg)
    val_loader = dataset_factory(phase='eval', cfg=cfg)
    eval_solver = eval_solver_factory(val_loader, cfg)

    ssd_net, priors, _ = model_factory(phase='train', cfg=cfg, tb_writer=tb_writer)
    net = ssd_net  # net is the parallel version of ssd_net
    print(net)
    print(cfg.TRAIN.OPTIMIZER)
    # return

    if args.cuda:
        net = torch.nn.DataParallel(ssd_net)
        priors = Variable(priors.cuda(), volatile=True)
    else:
        priors = Variable(priors)

    if args.resume:
        print('Resuming training, loading {}...'.format(args.resume))
        checkpoint = torch.load(args.resume)
        args.start_iter = checkpoint['iteration']
        step_index = checkpoint['step_index']
        ssd_net.load_state_dict(checkpoint['state_dict'])
    else:
        # pretained weights
        pretrained_weights = torch.load(osp.join(cfg.GENERAL.WEIGHTS_ROOT, args.basenet))
        print('Loading base network...')

        try:
            ssd_net.base.load_state_dict(pretrained_weights)
        except:
            model_dict = ssd_net.base.state_dict()
            pretrained_weights = {k: v for k,
                                           v in pretrained_weights.items() if k in model_dict}
            model_dict.update(pretrained_weights)
            ssd_net.base.load_state_dict(model_dict)

        # initialize newly added layers' weights with xavier method
        print('Initializing weights...')
        ssd_net.extras.apply(weights_init)
        ssd_net.loc.apply(weights_init)
        ssd_net.conf.apply(weights_init)

    if args.cuda:
        net = net.cuda()

    optimizer = optim.SGD(net.parameters(), lr=cfg.TRAIN.OPTIMIZER.LR,
                          momentum=cfg.TRAIN.OPTIMIZER.MOMENTUM,
                          weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY)
    criterion = MultiBoxLoss(cfg.MODEL.NUM_CLASSES, 0.5, True, 0, True, 3, 0.5,
                             False, args.cuda)

    # continue training at 8w, 12w...
    if args.start_iter not in cfg.TRAIN.LR_SCHEDULER.STEPS and step_index != 0:
        adjust_learning_rate(optimizer, cfg.TRAIN.LR_SCHEDULER.GAMMA, step_index)

    net.train()
    epoch_size = len(train_loader.dataset) // cfg.DATASET.TRAIN_BATCH_SIZE
    num_epochs = (cfg.TRAIN.MAX_ITER + epoch_size - 1) // epoch_size
    print('Training SSD on:', train_loader.dataset.name)
    print('Using the specified args:')
    print(args)

    # timer
    t_ = {'network': Timer(), 'misc': Timer(), 'all': Timer(), 'eval': Timer()}
    t_['all'].tic()

    iteration = args.start_iter
    for epoch in range(num_epochs):
        tb_writer.cfg['epoch'] = epoch
        for images, targets, _ in train_loader:
            tb_writer.cfg['iteration'] = iteration
            # t_['misc'].tic()
            # if iteration in cfg.TRAIN.LR_SCHEDULER.STEPS:
            #     t_['misc'].tic()
            #     step_index += 1
            #     adjust_learning_rate(optimizer, cfg.TRAIN.LR_SCHEDULER.GAMMA, step_index)
            #
            # if args.cuda:
            #     images = Variable(images.cuda())
            #     targets = [Variable(ann.cuda(), volatile=True) for ann in targets]
            # else:
            #     images = Variable(images)
            #     targets = [Variable(ann, volatile=True) for ann in targets]

            # # forward
            # t_['network'].tic()
            # out = net(images)
            # out1 = [out[0], out[1], priors]
            #
            # # backward
            # optimizer.zero_grad()
            # loss_l, loss_c = criterion(out1, targets)
            # loss = loss_l + loss_c
            # loss.backward()
            # optimizer.step()
            # t_['network'].toc()
            #
            # # log
            # if iteration % cfg.TRAIN.LOG_LOSS_ITER == 0:
            #     t_['misc'].toc()
            #     print('Iter ' + str(iteration) + ' || Loss: %.3f' % (loss.data[0]) +
            #           '|| conf_loss: %.3f' % (loss_c.data[0]) + ' || loc loss: %.3f ' % (loss_l.data[0]), end=' ')
            #     print('Timer: %.3f sec.' % t_['misc'].diff, '  Lr: %.6f' % optimizer.param_groups[0]['lr'])
            #     if args.tensorboard:
            #         phase = tb_writer.cfg['phase']
            #         tb_writer.writer.add_scalar('{}/loc_loss'.format(phase), loss_l.data[0], iteration)
            #         tb_writer.writer.add_scalar('{}/conf_loss'.format(phase), loss_c.data[0], iteration)
            #         tb_writer.writer.add_scalar('{}/all_loss'.format(phase), loss.data[0], iteration)
            #         tb_writer.writer.add_scalar('{}/time'.format(phase), t_['misc'].diff, iteration)
            #
            # # save model
            # if iteration % cfg.TRAIN.SAVE_ITER == 0 and iteration != args.start_iter or \
            #         iteration == cfg.TRAIN.MAX_ITER:
            #     print('Saving state, iter:', iteration)
            #     save_checkpoint({'iteration': iteration,
            #                      'step_index': step_index,
            #                      'state_dict': ssd_net.state_dict()},
            #                     snapshot_dir,
            #                     args.cfg_name + '_' + repr(iteration) + '.pth')

            # Eval
            if (iteration % cfg.TRAIN.EVAL_ITER == 0 ) or \
                    iteration == cfg.TRAIN.MAX_ITER:
                print('Start evaluation ......')
                tb_writer.cfg['phase'] = 'eval'
                t_['eval'].tic()
                net.eval()
                aps, mAPs = eval_solver.validate(net, priors, tb_writer=tb_writer)
                net.train()
                t_['eval'].toc()
                print('Iteration ' + str(iteration) + ' || mAP: %.3f' % mAPs[0] + ' ||eval_time: %.4f/%.4f' %
                      (t_['eval'].diff, t_['eval'].average_time))
                if cfg.DATASET.NAME == 'VOC0712':
                    tb_writer.writer.add_scalar('mAP/[email protected]', mAPs[0], iteration)
                else:
                    tb_writer.writer.add_scalar('mAP/[email protected]', mAPs[0], iteration)
                    tb_writer.writer.add_scalar('mAP/[email protected]', mAPs[1], iteration)
                tb_writer.cfg['phase'] = 'train'
                return

            if iteration == cfg.TRAIN.MAX_ITER:
                break
            iteration += 1

    backup_jobs(cfg, cfg_path, log_dir)
Пример #3
0
def train():
    tb_writer, cfg_path, snapshot_dir, log_dir = setup_folder(args, cfg)
    step_index = 0

    train_loader = dataset_factory(phase='train', cfg=cfg)
    val_loader = dataset_factory(phase='eval', cfg=cfg)
    eval_solver = eval_solver_factory(val_loader, cfg)

    ssd_net, priors, _ = model_factory(phase='train', cfg=cfg)
    net = ssd_net  # net is the parallel version of ssd_net
    print(net)

    if args.resume:
        print('Resuming training, loading {}...'.format(args.resume))
        checkpoint = torch.load(args.resume)
        args.start_iter = checkpoint['iteration'] + 1
        step_index = checkpoint['step_index']
        ssd_net.load_state_dict(checkpoint['state_dict'])
    elif cfg.MODEL.PRETRAIN_MODEL != '':
        # pretained weights
        pretrain_weights = torch.load(cfg.MODEL.PRETRAIN_MODEL)
        if 'reducedfc' not in cfg.MODEL.PRETRAIN_MODEL:
            ssd_net.apply(weights_init)
            try:
                ssd_net.load_state_dict(pretrain_weights['state_dict'],
                                        strict=False)
            except RuntimeError:  # another dataset
                entries = [
                    i for i in pretrain_weights['state_dict'].keys()
                    if i.startswith('conf')
                ]
                for key in entries:
                    del pretrain_weights['state_dict'][key]
                ssd_net.load_state_dict(pretrain_weights['state_dict'],
                                        strict=False)
        else:
            print('Loading base network...')
            ssd_net.base.load_state_dict(pretrain_weights)

            # initialize newly added layers' weights with xavier method
            print('Initializing weights...')
            ssd_net.extras.apply(weights_init)
            ssd_net.loc.apply(weights_init)
            ssd_net.conf.apply(weights_init)
    else:
        print('Initializing weights...')
        ssd_net.apply(weights_init)
        ssd_net.extras.apply(weights_init)
        ssd_net.loc.apply(weights_init)
        ssd_net.conf.apply(weights_init)

    optimizer = optim.SGD(net.parameters(),
                          lr=cfg.TRAIN.OPTIMIZER.LR,
                          momentum=cfg.TRAIN.OPTIMIZER.MOMENTUM,
                          weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY)

    if args.cuda:
        net = torch.nn.DataParallel(ssd_net, device_ids=cfg.GENERAL.NET_CPUS)
        priors = Variable(priors.cuda(cfg.GENERAL.LOSS_GPU),
                          requires_grad=False)
        net = net.cuda()
    else:
        priors = Variable(priors, requires_grad=False)

    ssd_net.priors = priors
    ssd_net.criterion = DetectLoss(cfg)
    criterion_post = DetectLossPost(cfg)
    net.train()

    print('Using the specified args: \n', args)

    epoch_size = len(train_loader.dataset) // cfg.DATASET.TRAIN_BATCH_SIZE
    num_epochs = (cfg.TRAIN.MAX_ITER + epoch_size - 1) // epoch_size
    iteration = args.start_iter
    start_epoch = int(iteration * 1.0 / epoch_size)
    # continue training at 8w, 12w...
    if step_index > 0:
        adjust_learning_rate(optimizer, cfg.TRAIN.OPTIMIZER.LR,
                             cfg.TRAIN.LR_SCHEDULER.GAMMA, 100, step_index,
                             None, None)

    # timer
    t_ = {'network': Timer(), 'misc': Timer(), 'eval': Timer()}
    t_['misc'].tic()
    iteration = args.start_iter
    for epoch in range(start_epoch, num_epochs):
        for images, targets, _ in train_loader:
            if iteration in cfg.TRAIN.LR_SCHEDULER.STEPS or (
                    iteration <= cfg.TRAIN.WARMUP_EPOCH * epoch_size):
                if iteration in cfg.TRAIN.LR_SCHEDULER.STEPS: step_index += 1
                adjust_learning_rate(optimizer, cfg.TRAIN.OPTIMIZER.LR,
                                     cfg.TRAIN.LR_SCHEDULER.GAMMA, epoch,
                                     step_index, iteration, epoch_size,
                                     cfg.TRAIN.WARMUP_EPOCH)

            # save model
            if iteration % cfg.TRAIN.SAVE_ITER == 0 and iteration != args.start_iter or \
                    iteration == cfg.TRAIN.MAX_ITER:
                print('Saving state, iter:', iteration)
                save_checkpoint(
                    {
                        'iteration': iteration,
                        'step_index': step_index,
                        'state_dict': ssd_net.state_dict()
                    }, snapshot_dir,
                    args.cfg_name + '_' + repr(iteration) + '.pth')
            # Eval
            if iteration % cfg.TRAIN.EVAL_ITER == 0 or iteration == cfg.TRAIN.MAX_ITER:
                t_['eval'].tic()
                net.eval()
                aps, mAPs = eval_solver.validate(net,
                                                 priors,
                                                 tb_writer=tb_writer)
                net.train()
                t_['eval'].toc()
                print('Iteration ' + str(iteration) +
                      ' || mAP: %.3f' % mAPs[0] + ' ||eval_time: %.4f/%.4f' %
                      (t_['eval'].diff, t_['eval'].average_time))
                if tb_writer is not None:
                    if cfg.DATASET.NAME == 'VOC0712' or 'FACE':
                        tb_writer.writer.add_scalar('mAP/[email protected]', mAPs[0],
                                                    iteration)
                    else:
                        tb_writer.writer.add_scalar('mAP/[email protected]', mAPs[0],
                                                    iteration)
                        tb_writer.writer.add_scalar('mAP/[email protected]', mAPs[1],
                                                    iteration)

                if iteration == cfg.TRAIN.MAX_ITER:
                    break

            if args.cuda:
                images = Variable(images.cuda(), requires_grad=False)
                targets = [
                    Variable(ann.cuda(cfg.GENERAL.LOSS_GPU), volatile=True)
                    for ann in targets
                ]
            else:
                images = Variable(images)
                targets = [Variable(ann, volatile=True) for ann in targets]

            # forward
            t_['network'].tic()

            match_result = matching(targets,
                                    priors,
                                    cfg.LOSS.OVERLAP_THRESHOLD,
                                    cfg.MODEL.VARIANCE,
                                    args.cuda,
                                    cfg.GENERAL.LOSS_GPU,
                                    cfg=cfg)
            net_outputs = net(images,
                              match_result=match_result,
                              tb_writer=tb_writer)
            loss, (loss_l, loss_c) = criterion_post(net_outputs)

            loss_str = ' || Loss: %.3f' % (
                loss.data[0]) + '|| conf_loss: %.3f' % (
                    loss_c) + ' || loc_loss: %.3f ' % (loss_l)
            # backward
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            t_['network'].toc()
            t_['misc'].toc()
            # log
            if iteration % cfg.TRAIN.LOG_LOSS_ITER == 0:
                current_date = time.strftime("%Y-%m-%d %H:%M:%S",
                                             time.localtime())

                print('Iter ' + str(iteration) + loss_str, end=' ')
                print(
                    'Timer: %.3f(%.3f) %.3f(%.3f) sec.' %
                    (t_['misc'].diff, t_['misc'].average_time,
                     t_['network'].diff, t_['network'].average_time),
                    'lr: %.6f' % optimizer.param_groups[0]['lr'], ' sys_time:',
                    current_date)

                if tb_writer is not None:
                    phase = 'train'
                    tb_writer.writer.add_scalar('{}/loc_loss'.format(phase),
                                                loss_l, iteration)
                    tb_writer.writer.add_scalar('{}/conf_loss'.format(phase),
                                                loss_c, iteration)
                    tb_writer.writer.add_scalar('{}/all_loss'.format(phase),
                                                loss.data[0], iteration)
                    tb_writer.writer.add_scalar('{}/time'.format(phase),
                                                t_['misc'].diff, iteration)

            iteration += 1
            t_['misc'].tic()
        parser.add_argument('--tensorboard',
                            default=True,
                            type=bool,
                            help='Use tensorboard')
        parser.add_argument('--loss_type',
                            default='ssd_loss',
                            type=str,
                            help='ssd_loss only now')
        args = parser.parse_args()

        return args

    args = parse_args()

    # cfg.MODEL.BASE='drn_d_22'
    # cfg.MODEL.IMAGE_SIZE=(321,321)
    # cfg.MODEL.SSD_TYPE = 'DRN_SSD'
    args.cfg_name = 'ssd_drn22_rfb_voc'
    args.job_group = 'rfb'
    tb_writer, cfg_path, snapshot_dir, log_dir = setup_folder(args, cfg)
    model, priors, _ = model_factory(phase='train',
                                     cfg=cfg,
                                     tb_writer=tb_writer)

    input_size = (cfg.MODEL.IMAGE_SIZE[0], cfg.MODEL.IMAGE_SIZE[1])

    total_flops = get_flops(model, input_size)

    # For default vgg16 model, this shoud output 31.386288 G FLOPS
    print("The Model's Total FLOPS is : {:.6f} G FLOPS".format(total_flops))
Пример #5
0
def analyze_gt(tb_writer):
    # read_gt()
    gt = pd.read_hdf('./cache/voc.hdf')
    vis = DetectVis(img_list=gt.path.unique())
    # color = ['w', 'b', 'r', 'y', 'c', 'k']
    vis_pair = [(gt, DetectVis.colored_box(multiply=True, score=False, linestyle='-'))]
    vis.show_detections(vis_pair)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='Single Shot MultiBox Detector Training With Pytorch')
    parser.add_argument('--cfg_name', default='ssd_analyze_voc',
                        help='base name of config file')
    parser.add_argument('--job_group', default='base', type=str,
                        help='Directory for saving checkpoint models')
    parser.add_argument('--cuda', default=False, type=bool,
                        help='Use CUDA to train model')
    parser.add_argument('--tensorboard', default=True, type=bool,
                        help='Use tensorboard')
    parser.add_argument('--devices', default='0,1,2,3,4', type=str,
                        help='GPU to use for net forward')
    parser.add_argument('--net_gpus', default=[0,1,2,3], type=list,
                        help='GPU to use for net forward')
    parser.add_argument('--loss_gpu', default=4, type=list,
                        help='GPU to use for loss calculation')
    args = parser.parse_args()
    tb_writer, _, _, log_dir = setup_folder(args, cfg)
    analyze_gt(tb_writer)

 def detect(self,img):
     color_ind = [(255,0,0), (0,255,0),(0,0,255),(255,0,0),(255,255,0)]
     label_name = ['drink','phone','hand','face']
     results = []
     tb_writer, cfg_path, snapshot_dir, log_dir = setup_folder(self.args, cfg, phase='eval')
     #cfg.DATASET.NUM_EVAL_PICS = 0
     #cfg.EVAL.ONLY_SAVE_RESULTS = True
     #cfg.DATASET.EVAL_BATCH_SIZE = 8
     #cfg.DATASET.NUM_WORKERS = 2
     # cfg.DATASET.VAL_DATASET_DIR = '/home/maolei/data/coverLP_det/'
     # cfg.DATASET.TEST_SETS = (('test_data', 'small_test.txt'), )
 
     #if tb_writer is not None:
     #    tb_writer.cfg['show_test_image'] = args.save_log
     model_dir = self.args.trained_model
 
     np.set_printoptions(precision=3, suppress=True, edgeitems=4)
     #loader = dataset_factory(phase='eval', cfg=cfg)
 
     # load net
     net, priors, _ = model_factory(phase='eval', cfg=cfg)
     # net.load_state_dict(torch.load(model_dir))
     net.load_state_dict(torch.load(model_dir)['state_dict'])
 
     if self.args.cuda:
         net = torch.nn.DataParallel(net)
         net = net.cuda()
         priors = Variable(priors.cuda(), volatile=True)
     else:
         priors = Variable(priors)
     net.eval()
     detector = DetectOut(cfg)
 
     #print('test_type:', cfg.DATASET.TEST_SETS, 'test_model:', args.trained_model,
     #      'device_id:', args.devices, 'test_dir:', args.test_path)
 
     
 
     img = cv2.imread(img)
     #if img is None:
        #print(img_root)
        #continue
     im_copy = img.copy()
     h,w,c = img.shape
     x = cv2.resize(img, (cfg.DATASET.IMAGE_SIZE[1], cfg.DATASET.IMAGE_SIZE[0])).astype(np.float32)
     x -= (104., 117., 123.)
     x = x[:, :, (2, 1, 0)]
     
     x = torch.from_numpy(x).permute(2,0,1)
     x = Variable(x.unsqueeze(0)).cuda()
     # net = net.cuda()
     loc, conf = net(x, phase='eval')
     detections = detector(loc, conf, priors).data
     cnt = 0
 
     #xmin, ymin, xmax, ymax, score, cls
     max_conf_bbx = [-1., -1., -1., -1., -1., -1.] #conf idx
     for j in range(1, detections.size(1)):
         #print(j)
         dets = detections[0, j, :]
         label = label_name[j-1]
         mask = dets[:, 0].gt(0.).expand(5, dets.size(0)).t()
         dets = torch.masked_select(dets, mask).view(-1, 5)
         if dets.dim() == 0:
             continue
         boxes = dets[:, 1:]
         boxes[:, 0] *= w
         boxes[:, 2] *= w
         boxes[:, 1] *= h
         boxes[:, 3] *= h
         scores = dets[:, 0].cpu().numpy()
         
         for t in range(scores.shape[0]):
             
             if(scores[t] > 0.5):
                 x1 = min(boxes[t][0], w)
                 x1 = round(max(x1, 0), 1)
                 x2 = min(boxes[t][2], w)
                 x2 = round(max(x2, 0), 1)
                 y1 = min(boxes[t][1], h)
                 y1 = round(max(y1, 0), 1)
                 y2 = min(boxes[t][3], h)
                 y2 = round(max(y2, 0), 1)
 
                 if max_conf_bbx[4] < scores[t]:
                     max_conf_bbx[0] = x1
                     max_conf_bbx[1] = y1
                     max_conf_bbx[2] = x2
                     max_conf_bbx[3] = y2
                     max_conf_bbx[4] = scores[t]
                     max_conf_bbx[5] = j - 1
                 
                 results.append([label,scores[t],int(x1),int(y1),int(x2),int(y2)])
     #print(results)
     return results  
Пример #7
0
def train():
    tb_writer, cfg_path, snapshot_dir, log_dir = setup_folder(args, cfg)
    print(cfg_path)
    step_index = 0

    train_loader = dataset_factory(phase='train', cfg=cfg)
    val_loader = dataset_factory(phase='eval', cfg=cfg)
    eval_solver = eval_solver_factory(val_loader, cfg)

    cls_net = clsmodel_factory(phase='train', cfg=cfg)
    net = cls_net  # net is the parallel version of cls_net

    print(net)
    if args.shownet:
        return;
    
    if args.cuda:
        net = torch.nn.DataParallel(cls_net, device_ids=cfg.GENERAL.NET_CPUS)

    cls_net.apply(weights_init)
    if args.resume:
        print('Resuming training, loading {}...'.format(args.resume))
        checkpoint = torch.load(args.resume)
        args.start_iter = checkpoint['iteration']
        step_index = checkpoint['step_index']
        cls_net.load_state_dict(checkpoint['state_dict'].state_dict())
    elif args.pretrain:
        # pretained weights
        print('Loading pretrained model: {}'.format(cfg.MODEL.PRETRAIN_MODEL))
        pretrain_weights = torch.load(cfg.MODEL.PRETRAIN_MODEL)
        if 'reducedfc' not in cfg.MODEL.PRETRAIN_MODEL:
            print('Loading whole network...')
            cls_net.load_state_dict(pretrain_weights['state_dict'].state_dict(), strict=False)
#             cls_net.apply(weights_init)
#             try:
#                 cls_net.load_state_dict(pretrain_weights, strict=False)
#             except RuntimeError:  # another dataset
#                 entries = [i for i in pretrain_weights['state_dict'].keys() if i.startswith('conf')]
#                 for key in entries:
#                     del pretrain_weights['state_dict'][key]
#                 cls_net.load_state_dict(pretrain_weights['state_dict'], strict=False)
        else:
            print('Loading base network...')
            cls_net.base.load_state_dict(pretrain_weights['state_dict'].state_dict(), strict=False)
    else:
        cls_net.apply(weights_init)
        print('random init net weight with xavier')

    if args.cuda:
        net = net.cuda()

    optimizer = optim.SGD(net.parameters(), lr=cfg.TRAIN.OPTIMIZER.LR,
                          momentum=cfg.TRAIN.OPTIMIZER.MOMENTUM,
                          weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY)

    # criterion = MultiBoxLoss(cfg, args.cuda)
#     cls_net.criterion = DetectLoss(cfg)
#     criterion_post = DetectLossPost(cfg)
#     criterion_post = nn.BCEWithLogitsLoss()
    criterion_post = FocalLoss_BCE(alpha=0.8,gamma=2,num_classes=1)
#     criterion_post = FocalLoss_BCE(alpha=0.5,gamma=0,num_classes=1)

    # continue training at 8w, 12w...
    if args.start_iter not in cfg.TRAIN.LR_SCHEDULER.STEPS and step_index != 0:
        adjust_learning_rate(optimizer, cfg.TRAIN.OPTIMIZER.LR, cfg.TRAIN.LR_SCHEDULER.GAMMA, 100,
                             step_index, None, None)

    net.train()
    epoch_size = len(train_loader.dataset) // cfg.DATASET.TRAIN_BATCH_SIZE
    num_epochs = (cfg.TRAIN.MAX_ITER - args.start_iter + epoch_size - 1) // epoch_size
    print('Training SSD on:', train_loader.dataset.name)
    print('Using the specified args:')
    print(args)

    # timer
    t_ = {'network': Timer(), 'forward': Timer(), 'misc': Timer(), 'all': Timer(), 'eval': Timer()}
    t_['all'].tic()

    iteration = args.start_iter
    epoch_bias = int(iteration/epoch_size)
    for epoch in range(num_epochs):
        epoch += epoch_bias
        tb_writer.cfg['epoch'] = epoch
        for images,targets,_ in train_loader:
            tb_writer.cfg['iteration'] = iteration
            t_['misc'].tic()
            if iteration in cfg.TRAIN.LR_SCHEDULER.STEPS or \
                    (epoch < cfg.TRAIN.WARMUP_EPOCH and not args.resume):
                if epoch >= cfg.TRAIN.WARMUP_EPOCH:
                    step_index += 1
                adjust_learning_rate(optimizer, cfg.TRAIN.OPTIMIZER.LR, cfg.TRAIN.LR_SCHEDULER.GAMMA, epoch,
                                     step_index, iteration, epoch_size, cfg.TRAIN.WARMUP_EPOCH)

            # save model
            if iteration % cfg.TRAIN.SAVE_ITER == 0 and iteration != args.start_iter or \
                    iteration == cfg.TRAIN.MAX_ITER:
                print('Saving state, iter:', iteration)
                save_checkpoint({'iteration': iteration,
                                 'step_index': step_index,
                                 'state_dict': cls_net},
                                snapshot_dir,
                                args.cfg_name + '_' + repr(iteration) + '.pth')
            # Eval
            if iteration % cfg.TRAIN.EVAL_ITER == 0 or iteration == cfg.TRAIN.MAX_ITER:
                tb_writer.cfg['phase'] = 'eval'
                tb_writer.cfg['iter'] = iteration
                t_['eval'].tic()
                net.eval()
                if torch.cuda.is_available():
                    net = nn.DataParallel(net, device_ids=[0])
                aps, mAPs = eval_solver.validate(net, tb_writer=tb_writer)
                net.train()
                if torch.cuda.is_available():
                    net = torch.nn.DataParallel(cls_net, device_ids=cfg.GENERAL.NET_CPUS)
                t_['eval'].toc()
                print('Iteration ' + str(iteration) + ' || mAP: %.3f' % mAPs[0] + ' ||eval_time: %.4f/%.4f' %
                      (t_['eval'].diff, t_['eval'].average_time))
                if cfg.DATASET.NAME == 'VOC0712':
                    tb_writer.writer.add_scalar('mAP/[email protected]', mAPs[0], iteration)
                else:
                    tb_writer.writer.add_scalar('mAP/[email protected]', mAPs[0], iteration)
                    tb_writer.writer.add_scalar('mAP/[email protected]', mAPs[1], iteration)
                tb_writer.cfg['phase'] = 'train'

                if iteration == cfg.TRAIN.MAX_ITER:
                    break
                    
            targets = torch.stack(targets)
            if args.cuda:
                images = Variable(images.cuda(), requires_grad=False)
#                 targets = [Variable(ann.cuda(cfg.GENERAL.LOSS_GPU), volatile=True)
#                            for ann in targets]
                targets = Variable(targets.cuda(cfg.GENERAL.LOSS_GPU), requires_grad=False)
            else:
                images = Variable(images)
#                 targets = [Variable(ann, volatile=True) for ann in targets]
                targets = Variable(targets, requires_grad=False)

            # forward
            t_['network'].tic()
            t_['forward'].tic()
        
            net_outputs = net(images)
            t_['forward'].toc()
#             import pdb
#             pdb.set_trace()
            
#             net_outputs = net_outputs.view(1,-1)
#             targets = targets.view(1,-1)
            loss = criterion_post(net_outputs,targets)

            # backward
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            t_['network'].toc()

            # log
            if iteration % cfg.TRAIN.LOG_LOSS_ITER == 0 or iteration == 1:
                t_['misc'].toc()
                now_time = datetime.datetime.now()
                time_str = datetime.datetime.strftime(now_time,'%Y-%m-%d %H:%M:%S')
                print(time_str+'\tIter ' + str(iteration) + ' || Loss: %.3f' % (loss.data[0]), end=' ')
                print('Forward Timer: %.3f sec.' % t_['forward'].diff, '  Lr: %.6f' % optimizer.param_groups[0]['lr'])
                if args.tensorboard:
                    phase = tb_writer.cfg['phase']
                    tb_writer.writer.add_scalar('{}/all_loss'.format(phase), loss.data[0], iteration)
                    tb_writer.writer.add_scalar('{}/time'.format(phase), t_['misc'].diff, iteration)

            iteration += 1

    backup_jobs(cfg, cfg_path, log_dir)