Exemple #1
0
def train():
    args = parse_args()

    path_to_save = os.path.join(args.save_folder, args.version)
    os.makedirs(path_to_save, exist_ok=True)

    hr = False  
    if args.high_resolution:
        print('use hi-res backbone')
        hr = True
    
    cfg = voc_ab

    if args.cuda:
        print('use cuda')
        cudnn.benchmark = True
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    # use multi-scale trick
    if args.multi_scale:
        print('use multi-scale trick.')
        input_size = [608, 608]
        dataset = VOCDetection(root=args.dataset_root, transform=SSDAugmentation([608, 608], mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)))

    else:
        input_size = cfg['min_dim']
        dataset = VOCDetection(root=args.dataset_root, transform=SSDAugmentation(cfg['min_dim'], mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)))

    # build model
    if args.version == 'yolo_v2':
        from models.yolo_v2 import myYOLOv2
        total_anchor_size = tools.get_total_anchor_size()
    
        yolo_net = myYOLOv2(device, input_size=input_size, num_classes=args.num_classes, trainable=True, anchor_size=total_anchor_size, hr=hr)
        print('Let us train yolo-v2 on the VOC0712 dataset ......')

    elif args.version == 'yolo_v3':
        from models.yolo_v3 import myYOLOv3
        total_anchor_size = tools.get_total_anchor_size(multi_level=True, version='yolo_v3')
        
        yolo_net = myYOLOv3(device, input_size=input_size, num_classes=args.num_classes, trainable=True, anchor_size=total_anchor_size, hr=hr)
        print('Let us train yolo-v3 on the VOC0712 dataset ......')

    elif args.version == 'slim_yolo_v2':
        from models.slim_yolo_v2 import SlimYOLOv2
        total_anchor_size = tools.get_total_anchor_size()
    
        yolo_net = SlimYOLOv2(device, input_size=input_size, num_classes=args.num_classes, trainable=True, anchor_size=total_anchor_size, hr=hr)
        print('Let us train slim-yolo-v2 on the VOC0712 dataset ......')

    elif args.version == 'tiny_yolo_v3':
        from models.tiny_yolo_v3 import YOLOv3tiny
        total_anchor_size = tools.get_total_anchor_size(multi_level=True, version='tiny_yolo_v3')
    
        yolo_net = YOLOv3tiny(device, input_size=input_size, num_classes=args.num_classes, trainable=True, anchor_size=total_anchor_size, hr=hr)
        print('Let us train tiny-yolo-v3 on the VOC0712 dataset ......')

    else:
        print('Unknown version !!!')
        exit()


    # finetune the model trained on COCO 
    if args.resume is not None:
        print('finetune COCO trained ')
        yolo_net.load_state_dict(torch.load(args.resume, map_location=device), strict=False)


    # use tfboard
    if args.tfboard:
        print('use tensorboard')
        from torch.utils.tensorboard import SummaryWriter
        c_time = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
        log_path = os.path.join('log/voc/', args.version, c_time)
        os.makedirs(log_path, exist_ok=True)

        writer = SummaryWriter(log_path)
    
    print("----------------------------------------Object Detection--------------------------------------------")
    model = yolo_net
    model.to(device)

    base_lr = args.lr
    tmp_lr = base_lr
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,
                                            weight_decay=args.weight_decay)

    # loss counters
    print("----------------------------------------------------------")
    print("Let's train OD network !")
    print('Training on:', dataset.name)
    print('The dataset size:', len(dataset))
    print("----------------------------------------------------------")

    epoch_size = len(dataset) // args.batch_size
    max_epoch = cfg['max_epoch']

    data_loader = data.DataLoader(dataset, args.batch_size,
                                  num_workers=args.num_workers,
                                  shuffle=True, collate_fn=detection_collate,
                                  pin_memory=True)
    # create batch iterator
    t0 = time.time()

    # start training
    for epoch in range(max_epoch):
        
        # use cos lr
        if args.cos and epoch > 20 and epoch <= max_epoch - 20:
            # use cos lr
            tmp_lr = 0.00001 + 0.5*(base_lr-0.00001)*(1+math.cos(math.pi*(epoch-20)*1./ (max_epoch-20)))
            set_lr(optimizer, tmp_lr)

        elif args.cos and epoch > max_epoch - 20:
            tmp_lr = 0.00001
            set_lr(optimizer, tmp_lr)
        
        # use step lr
        else:
            if epoch in cfg['lr_epoch']:
                tmp_lr = tmp_lr * 0.1
                set_lr(optimizer, tmp_lr)
    
        for iter_i, (images, targets) in enumerate(data_loader):
            # WarmUp strategy for learning rate
            if not args.no_warm_up:
                if epoch < args.wp_epoch:
                    tmp_lr = base_lr * pow((iter_i+epoch*epoch_size)*1. / (args.wp_epoch*epoch_size), 4)
                    # tmp_lr = 1e-6 + (base_lr-1e-6) * (iter_i+epoch*epoch_size) / (epoch_size * (args.wp_epoch))
                    set_lr(optimizer, tmp_lr)

                elif epoch == args.wp_epoch and iter_i == 0:
                    tmp_lr = base_lr
                    set_lr(optimizer, tmp_lr)
                    
            targets = [label.tolist() for label in targets]

            # make train label
            if args.version == 'yolo_v2' or args.version == 'slim_yolo_v2':
                targets = tools.gt_creator(input_size, yolo_net.stride, targets, version=args.version)
            elif args.version == 'yolo_v3' or args.version == 'tiny_yolo_v3':
                targets = tools.multi_gt_creator(input_size, yolo_net.stride, targets, version=args.version)

            # to device
            images = images.to(device)
            targets = torch.tensor(targets).float().to(device)

            # forward and loss
            conf_loss, cls_loss, txtytwth_loss, total_loss = model(images, target=targets)
                     
            # backprop and update
            total_loss.backward()
            optimizer.step()
            optimizer.zero_grad()

            if iter_i % 10 == 0:
                if args.tfboard:
                    # viz loss
                    writer.add_scalar('object loss', conf_loss.item(), iter_i + epoch * epoch_size)
                    writer.add_scalar('class loss', cls_loss.item(), iter_i + epoch * epoch_size)
                    writer.add_scalar('local loss', txtytwth_loss.item(), iter_i + epoch * epoch_size)
                
                t1 = time.time()
                print('[Epoch %d/%d][Iter %d/%d][lr %.6f]'
                    '[Loss: obj %.2f || cls %.2f || bbox %.2f || total %.2f || size %d || time: %.2f]'
                        % (epoch+1, max_epoch, iter_i, epoch_size, tmp_lr,
                            conf_loss.item(), cls_loss.item(), txtytwth_loss.item(), total_loss.item(), input_size[0], t1-t0),
                        flush=True)

                t0 = time.time()

            # multi-scale trick
            if iter_i % 10 == 0 and iter_i > 0 and args.multi_scale:
                size = random.randint(10, 19) * 32
                input_size = [size, size]
                model.set_grid(input_size)

                # change input dim
                # But this operation will report bugs when we use more workers in data loader, so I have to use 0 workers.
                # I don't know how to make it suit more workers, and I'm trying to solve this question.
                data_loader.dataset.reset_transform(SSDAugmentation(input_size, mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)))

        if (epoch + 1) % 10 == 0:
            print('Saving state, epoch:', epoch + 1)
            torch.save(model.state_dict(), os.path.join(path_to_save, 
                        args.version + '_' + repr(epoch + 1) + '.pth')  
                    )
def train(net, device):
    global cfg, hr
    # set GPU

    use_focal = False
    if args.use_focal == 1:
        print("Let's use focal loss for objectness !!!")
        use_focal = True

    if args.multi_scale == 1:
        print('Let us use the multi-scale trick.')
        ms_inds = range(len(cfg['multi_scale']))
        dataset = COCODataset(data_dir=data_dir,
                              img_size=608,
                              transform=SSDAugmentation([608, 608], MEANS),
                              debug=args.debug)
    else:
        dataset = COCODataset(data_dir=data_dir,
                              img_size=cfg['min_dim'][0],
                              transform=SSDAugmentation(cfg['min_dim'], MEANS),
                              debug=args.debug)

    print("Setting Arguments.. : ", args)
    print("----------------------------------------------------------")
    print('Loading the MSCOCO dataset...')
    print('Training model on:', dataset.name)
    print('The dataset size:', len(dataset))
    print('The obj weight : ', args.obj)
    print('The noobj weight : ', args.noobj)
    print("----------------------------------------------------------")

    input_size = cfg['min_dim']
    num_classes = args.num_classes
    batch_size = args.batch_size
    save_folder = args.save_folder

    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    # using tfboard
    from tensorboardX import SummaryWriter
    c_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
    log_path = 'log/yolo_v2/coco/' + c_time
    if not os.path.exists(log_path):
        os.mkdir(log_path)
    if not os.path.exists(log_path):
        os.mkdir(log_path)

    writer = SummaryWriter(log_path)

    if args.high_resolution == 1:
        hr = True

    print('Let us train yolo-v2 on the MSCOCO dataset ......')

    model = net
    model.to(device).train()

    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             collate_fn=detection_collate,
                                             num_workers=args.n_cpu)

    evaluator = COCOAPIEvaluator(data_dir=data_dir,
                                 img_size=cfg['min_dim'],
                                 device=device,
                                 transform=BaseTransform(
                                     cfg['min_dim'], MEANS))

    # optimizer setup
    # optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,
    #                       dampening=0, weight_decay=args.weight_decay)
    # optimizer = optim.Adam(model.parameters())
    lr = args.lr
    optimizer = optim.RMSprop(model.parameters(), lr=args.lr)

    step_index = 0
    epoch_size = len(dataset) // args.batch_size
    # each part of loss weight
    obj_w = 1.0
    cla_w = 1.0
    box_w = 2.0

    # start training loop
    iteration = 0
    for epoch in range(cfg['max_epoch']):
        batch_iterator = iter(dataloader)

        # No WarmUp strategy or WarmUp tage has finished.
        if epoch in cfg['lr_epoch']:
            step_index += 1
            lr = adjust_learning_rate(optimizer, args.gamma, step_index)

        # COCO evaluation
        if (epoch + 1) % args.eval_epoch == 0:
            model.trainable = False
            ap50_95, ap50 = evaluator.evaluate(model)
            print('ap50 : ', ap50)
            print('ap90_95 : ', ap50_95)
            model.trainable = True
            model.train()
            writer.add_scalar('val/COCOAP50', ap50, epoch + 1)
            writer.add_scalar('val/COCOAP50_95', ap50_95, epoch + 1)

        # subdivision loop
        optimizer.zero_grad()
        for images, targets in batch_iterator:
            iteration += 1

            # multi-scale trick
            if iteration % 10 == 0 and args.multi_scale == 1:
                ms_ind = random.sample(ms_inds, 1)[0]
                input_size = cfg['multi_scale'][int(ms_ind)]

            # multi scale
            if args.multi_scale == 1:
                images = torch.nn.functional.interpolate(images,
                                                         size=input_size,
                                                         mode='bilinear',
                                                         align_corners=True)

            targets = [label.tolist() for label in targets]
            if args.version == 'yolo_v2':
                targets = tools.gt_creator(input_size,
                                           yolo_net.stride,
                                           args.num_classes,
                                           targets,
                                           name='COCO')
            elif args.version == 'yolo_v3':
                targets = tools.multi_gt_creator(input_size,
                                                 yolo_net.stride,
                                                 args.num_classes,
                                                 targets,
                                                 name='COCO')

            targets = torch.tensor(targets).float().to(device)

            t0 = time.time()
            out = model(images.to(device))
            obj_loss, class_loss, box_loss = tools.loss(
                out,
                targets,
                num_classes=args.num_classes,
                use_focal=use_focal,
                obj=args.obj,
                noobj=args.noobj)
            total_loss = obj_w * obj_loss + cla_w * class_loss + box_w * box_loss

            # viz loss
            writer.add_scalar('object loss', obj_loss.item(), iteration)
            writer.add_scalar('class loss', class_loss.item(), iteration)
            writer.add_scalar('local loss', box_loss.item(), iteration)
            writer.add_scalar('total loss', total_loss.item(), iteration)
            # backprop
            total_loss.backward()
            optimizer.step()
            t1 = time.time()

            if iteration % 10 == 0:
                print('timer: %.4f sec.' % (t1 - t0))
                # print(obj_loss.item(), class_loss.item(), box_loss.item())
                print('Epoch[%d / %d]' % (epoch+1, cfg['max_epoch']) + ' || iter ' + repr(iteration) + \
                      ' || Loss: %.4f ||' % (total_loss.item()) + ' || lr: %.8f ||' % (lr) + ' || input size: %d ||' % input_size[0], end=' ')

        if (epoch + 1) % 10 == 0:
            print('Saving state, epoch:', epoch + 1)
            torch.save(
                yolo_net.state_dict(), save_folder + '/' + args.version + '_' +
                repr(epoch + 1) + '.pth')
def train(net, device):
    global cfg, hr
    # set GPU

    use_focal = False
    if args.use_focal:
        print("Let's use focal loss for objectness !!!")
        use_focal = True

    if args.multi_scale:
        print('Let us use the multi-scale trick.')
        ms_inds = range(len(cfg['multi_scale']))
        dataset = COCODataset(
                    data_dir=data_dir,
                    img_size=608,
                    transform=SSDAugmentation([608, 608], mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)),
                    debug=args.debug)
    else:
        dataset = COCODataset(
                    data_dir=data_dir,
                    img_size=cfg['min_dim'][0],
                    transform=SSDAugmentation(cfg['min_dim'], mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)),
                    debug=args.debug)
    
    print("Setting Arguments.. : ", args)
    print("----------------------------------------------------------")
    print('Loading the MSCOCO dataset...')
    print('Training model on:', dataset.name)
    print('The dataset size:', len(dataset))
    print('The obj weight : ', args.obj)
    print('The noobj weight : ', args.noobj)
    print("----------------------------------------------------------")

    input_size = cfg['min_dim']
    num_classes = args.num_classes
    batch_size = args.batch_size

    os.makedirs(args.save_folder + args.version, exist_ok=True)

    # using tfboard
    from tensorboardX import SummaryWriter
    c_time = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
    log_path = 'log/coco/' + c_time
    os.makedirs(log_path, exist_ok=True)

    writer = SummaryWriter(log_path)

    if args.high_resolution == 1:
        hr = True

    print('Let us train yolo-v2 on the MSCOCO dataset ......')
    
    model = net
    model.to(device).train()

    dataloader = torch.utils.data.DataLoader(
                    dataset, 
                    batch_size=batch_size, 
                    shuffle=True, 
                    collate_fn=detection_collate,
                    num_workers=args.n_cpu)

    evaluator = COCOAPIEvaluator(
                    data_dir=data_dir,
                    img_size=cfg['min_dim'],
                    device=device,
                    transform=BaseTransform(cfg['min_dim'], MEANS)
                    )

    # optimizer setup
    lr = args.lr
    # optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,
    #                                         weight_decay=args.weight_decay)
    # optimizer = optim.Adam(model.parameters())
    optimizer = optim.RMSprop(model.parameters(), lr=args.lr)

    step_index = 0
    epoch_size = len(dataset) // args.batch_size
    # each part of loss weight
    obj_w = 1.0
    cla_w = 1.0
    box_w = 1.0

    # start training loop
    iteration = 0
    t0 = time.time()

    for epoch in range(cfg['max_epoch']):
        batch_iterator = iter(dataloader)

        # use cos lr
        if args.cos and epoch > 20 and epoch <= cfg['max_epoch'] - 20:
            # use cos lr
            lr = cos_lr(optimizer, epoch, cfg['max_epoch'])
        elif args.cos and epoch > cfg['max_epoch'] - 20:
            lr = 0.00001  
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
        # use step lr
        else:
            if epoch in cfg['lr_epoch']:
                step_index += 1
                lr = adjust_learning_rate(optimizer, args.gamma, step_index)
    
        # COCO evaluation
        if (epoch + 1) % args.eval_epoch == 0:
            model.trainable = False
            ap50_95, ap50 = evaluator.evaluate(model)
            print('ap50 : ', ap50)
            print('ap50_95 : ', ap50_95)
            model.trainable = True
            model.train()
            writer.add_scalar('val/COCOAP50', ap50, epoch + 1)
            writer.add_scalar('val/COCOAP50_95', ap50_95, epoch + 1)

        # subdivision loop
        for images, targets in batch_iterator:
            # WarmUp strategy for learning rate
            if not args.no_warm_up == 'yes':
                if epoch < args.wp_epoch:
                    lr = warmup_strategy(optimizer, epoch_size, iteration)

            iteration += 1
        
            # multi-scale trick
            if iteration % 10 == 0 and args.multi_scale:
                ms_ind = random.sample(ms_inds, 1)[0]
                input_size = cfg['multi_scale'][int(ms_ind)]
            
            # multi scale
            if args.multi_scale:
                images = torch.nn.functional.interpolate(images, size=input_size, mode='bilinear', align_corners=True)

            targets = [label.tolist() for label in targets]
            if args.version == 'yolo_v2' or args.version == 'tiny_yolo_v2':
                targets = tools.gt_creator(input_size, yolo_net.stride, targets, name='COCO')
            elif args.version == 'yolo_v3' or args.version == 'tiny_yolo_v3':
                targets = tools.multi_gt_creator(input_size, yolo_net.stride, targets, name='COCO')

            targets = torch.tensor(targets).float().to(device)

            out = model(images.to(device))

            optimizer.zero_grad()

            obj_loss, class_loss, box_loss = tools.loss(out, targets, num_classes=args.num_classes, 
                                                        use_focal=use_focal,
                                                        obj=args.obj,
                                                        noobj=args.noobj)
            total_loss = obj_w * obj_loss + cla_w * class_loss + box_w * box_loss

            # viz loss
            writer.add_scalar('object loss', obj_loss.item(), iteration)
            writer.add_scalar('class loss', class_loss.item(), iteration)
            writer.add_scalar('local loss', box_loss.item(), iteration)
            writer.add_scalar('total loss', total_loss.item(), iteration)
            # backprop
            total_loss.backward()        
            optimizer.step()

            if iteration % 10 == 0:
                t1 = time.time()
                print('[Epoch %d/%d][Iter %d][lr %.8f]'
                    '[Loss: obj %.2f || cls %.2f || bbox %.2f || total %.2f || imgsize %d || time: %.2f]'
                        % (epoch+1, cfg['max_epoch'], iteration, lr,
                            obj_loss.item(), class_loss.item(), box_loss.item(), total_loss.item(), input_size[0], t1-t0),
                        flush=True)

                t0 = time.time()


        if (epoch + 1) % 10 == 0:
            print('Saving state, epoch:', epoch + 1)
            torch.save(yolo_net.state_dict(), os.path.join(args.save_folder + args.version, 
                        args.version + '_' + repr(epoch + 1) + '.pth')
                        )  
Exemple #4
0
def train():
    args = parse_args()

    path_to_save = os.path.join(args.save_folder, args.dataset, args.version)
    os.makedirs(path_to_save, exist_ok=True)

    # cuda
    if args.cuda:
        print('use cuda')
        cudnn.benchmark = True
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    # multi-scale
    train_size = val_size = args.img_size

    cfg = config.train_cfg
    # dataset and evaluator
    print("Setting Arguments.. : ", args)
    print("----------------------------------------------------------")
    print('Loading the dataset...')

    # dataset and evaluator
    dataset, evaluator, num_classes = build_dataset(args, train_size, val_size,
                                                    device)
    # dataloader
    dataloader = build_dataloader(args, dataset, detection_collate)

    print('Training model on:', args.dataset)
    print('The dataset size:', len(dataset))
    print("----------------------------------------------------------")

    if args.dataset == 'voc':
        anchor_size = config.MULTI_ANCHOR_SIZE
    elif args.dataset == 'coco':
        anchor_size = config.MULTI_ANCHOR_SIZE_COCO

    # build model
    if args.version == 'yolo_nano':
        from models.yolo_nano import YOLONano
        backbone = '1.0x'
        net = YOLONano(device=device,
                       input_size=train_size,
                       num_classes=num_classes,
                       trainable=True,
                       anchor_size=anchor_size,
                       backbone=backbone)
        print('Let us train yolo_nano ......')

    else:
        print('Unknown model version !!!')
        exit()

    model = net
    model = model.to(device)

    # compute FLOPs and Params
    model.trainable = False
    model.eval()
    FLOPs_and_Params(model=model, size=train_size, device=device)
    model.trainable = True
    model.train()

    # EMA
    ema = ModelEMA(model) if args.ema else None

    # use tfboard
    if args.tfboard:
        print('use tensorboard')
        from torch.utils.tensorboard import SummaryWriter
        c_time = time.strftime('%Y-%m-%d %H:%M:%S',
                               time.localtime(time.time()))
        log_path = os.path.join('log/coco/', args.version, c_time)
        os.makedirs(log_path, exist_ok=True)

        writer = SummaryWriter(log_path)

    # keep training
    if args.resume is not None:
        print('keep training model: %s' % (args.resume))
        model.load_state_dict(torch.load(args.resume, map_location=device))

    # optimizer setup
    base_lr = args.lr
    tmp_lr = base_lr
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=5e-4)

    max_epoch = args.max_epoch
    epoch_size = len(dataset) // args.batch_size
    warmup = True

    # start training loop
    t0 = time.time()

    for epoch in range(args.start_epoch, max_epoch):

        # use step lr
        if epoch in args.lr_epoch:
            tmp_lr = tmp_lr * 0.1
            set_lr(optimizer, tmp_lr)

        for iter_i, (images, targets) in enumerate(dataloader):
            ni = iter_i + epoch * epoch_size
            # warmup
            if epoch < args.wp_epoch and warmup:
                nw = args.wp_epoch * epoch_size
                tmp_lr = base_lr * pow(ni / nw, 4)
                set_lr(optimizer, tmp_lr)

            elif epoch == args.wp_epoch and iter_i == 0 and warmup:
                # warmup is over
                warmup = False
                tmp_lr = base_lr
                set_lr(optimizer, tmp_lr)

            # multi-scale trick
            if iter_i % 10 == 0 and iter_i > 0 and args.multi_scale:
                # randomly choose a new size
                train_size = random.randint(10, 19) * 32
                model.set_grid(train_size)
            if args.multi_scale:
                # interpolate
                images = torch.nn.functional.interpolate(images,
                                                         size=train_size,
                                                         mode='bilinear',
                                                         align_corners=False)

            targets = [label.tolist() for label in targets]
            # make train label
            targets = tools.multi_gt_creator(train_size,
                                             net.stride,
                                             targets,
                                             anchor_size=anchor_size)

            # to device
            images = images.to(device).float()
            targets = targets.to(device).float()

            # forward and loss
            conf_loss, cls_loss, bbox_loss, iou_loss = model(images,
                                                             target=targets)

            # total loss
            total_loss = conf_loss + cls_loss + bbox_loss + iou_loss

            # check loss
            if torch.isnan(total_loss):
                continue

            # Backward and Optimize
            total_loss.backward()
            optimizer.step()
            optimizer.zero_grad()

            # ema
            if args.ema:
                ema.update(model)

            # display
            if iter_i % 10 == 0:
                if args.tfboard:
                    # viz loss
                    writer.add_scalar('obj loss', conf_loss.item(),
                                      iter_i + epoch * epoch_size)
                    writer.add_scalar('cls loss', cls_loss.item(),
                                      iter_i + epoch * epoch_size)
                    writer.add_scalar('box loss', bbox_loss.item(),
                                      iter_i + epoch * epoch_size)
                    writer.add_scalar('iou loss', iou_loss.item(),
                                      iter_i + epoch * epoch_size)

                t1 = time.time()
                print(
                    '[Epoch %d/%d][Iter %d/%d][lr %.6f]'
                    '[Loss: obj %.2f || cls %.2f || bbox %.2f || iou %.2f || total %.2f || size %d || time: %.2f]'
                    %
                    (epoch + 1, max_epoch, iter_i, epoch_size, tmp_lr,
                     conf_loss.item(), cls_loss.item(), bbox_loss.item(),
                     iou_loss.item(), total_loss.item(), train_size, t1 - t0),
                    flush=True)

                t0 = time.time()

        # evaluation
        if (epoch + 1) % args.eval_epoch == 0:
            model.trainable = False
            model.set_grid(val_size)
            model.eval()

            # evaluate
            evaluator.evaluate(model)

            # convert to training mode.
            model.trainable = True
            model.set_grid(train_size)
            model.train()

            # save model
            print('Saving state, epoch:', epoch + 1)
            torch.save(
                model.state_dict(),
                os.path.join(path_to_save,
                             args.version + '_' + repr(epoch + 1) + '.pth'))
Exemple #5
0
def train():
    args = parse_args()

    path_to_save = os.path.join(args.save_folder, args.dataset, args.version)
    os.makedirs(path_to_save, exist_ok=True)

    # use hi-res backbone
    if args.high_resolution:
        print('use hi-res backbone')
        hr = True
    else:
        hr = False

    # cuda
    if args.cuda:
        print('use cuda')
        cudnn.benchmark = True
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    # multi-scale
    if args.multi_scale:
        print('use the multi-scale trick ...')
        train_size = [640, 640]
        val_size = [416, 416]
    else:
        train_size = [416, 416]
        val_size = [416, 416]

    cfg = train_cfg
    # dataset and evaluator
    print("Setting Arguments.. : ", args)
    print("----------------------------------------------------------")
    print('Loading the dataset...')

    if args.dataset == 'voc':
        data_dir = VOC_ROOT
        num_classes = 20
        dataset = VOCDetection(root=data_dir,
                               transform=SSDAugmentation(train_size))

        evaluator = VOCAPIEvaluator(data_root=data_dir,
                                    img_size=val_size,
                                    device=device,
                                    transform=BaseTransform(val_size),
                                    labelmap=VOC_CLASSES)

    elif args.dataset == 'coco':
        data_dir = coco_root
        num_classes = 80
        dataset = COCODataset(data_dir=data_dir,
                              img_size=train_size[0],
                              transform=SSDAugmentation(train_size),
                              debug=args.debug)

        evaluator = COCOAPIEvaluator(data_dir=data_dir,
                                     img_size=val_size,
                                     device=device,
                                     transform=BaseTransform(val_size))

    else:
        print('unknow dataset !! Only support voc and coco !!')
        exit(0)

    print('Training model on:', dataset.name)
    print('The dataset size:', len(dataset))
    print("----------------------------------------------------------")

    # dataloader
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=args.batch_size,
                                             shuffle=True,
                                             collate_fn=detection_collate,
                                             num_workers=args.num_workers,
                                             pin_memory=True)

    # build model
    if args.version == 'yolo_v2':
        from models.yolo_v2 import myYOLOv2
        anchor_size = ANCHOR_SIZE if args.dataset == 'voc' else ANCHOR_SIZE_COCO

        yolo_net = myYOLOv2(device,
                            input_size=train_size,
                            num_classes=num_classes,
                            trainable=True,
                            anchor_size=anchor_size,
                            hr=hr)
        print('Let us train yolo_v2 on the %s dataset ......' % (args.dataset))

    elif args.version == 'yolo_v3':
        from models.yolo_v3 import myYOLOv3
        anchor_size = MULTI_ANCHOR_SIZE if args.dataset == 'voc' else MULTI_ANCHOR_SIZE_COCO

        yolo_net = myYOLOv3(device,
                            input_size=train_size,
                            num_classes=num_classes,
                            trainable=True,
                            anchor_size=anchor_size,
                            hr=hr)
        print('Let us train yolo_v3 on the %s dataset ......' % (args.dataset))

    elif args.version == 'yolo_v3_spp':
        from models.yolo_v3_spp import myYOLOv3Spp
        anchor_size = MULTI_ANCHOR_SIZE if args.dataset == 'voc' else MULTI_ANCHOR_SIZE_COCO

        yolo_net = myYOLOv3Spp(device,
                               input_size=train_size,
                               num_classes=num_classes,
                               trainable=True,
                               anchor_size=anchor_size,
                               hr=hr)
        print('Let us train yolo_v3_spp on the %s dataset ......' %
              (args.dataset))

    elif args.version == 'slim_yolo_v2':
        from models.slim_yolo_v2 import SlimYOLOv2
        anchor_size = ANCHOR_SIZE if args.dataset == 'voc' else ANCHOR_SIZE_COCO

        yolo_net = SlimYOLOv2(device,
                              input_size=train_size,
                              num_classes=num_classes,
                              trainable=True,
                              anchor_size=anchor_size,
                              hr=hr)
        print('Let us train slim_yolo_v2 on the %s dataset ......' %
              (args.dataset))

    elif args.version == 'tiny_yolo_v3':
        from models.tiny_yolo_v3 import YOLOv3tiny
        anchor_size = TINY_MULTI_ANCHOR_SIZE if args.dataset == 'voc' else TINY_MULTI_ANCHOR_SIZE_COCO

        yolo_net = YOLOv3tiny(device,
                              input_size=train_size,
                              num_classes=num_classes,
                              trainable=True,
                              anchor_size=anchor_size,
                              hr=hr)
        print('Let us train tiny_yolo_v3 on the %s dataset ......' %
              (args.dataset))

    else:
        print('Unknown version !!!')
        exit()

    model = yolo_net
    model.to(device).train()

    # use tfboard
    if args.tfboard:
        print('use tensorboard')
        from torch.utils.tensorboard import SummaryWriter
        c_time = time.strftime('%Y-%m-%d %H:%M:%S',
                               time.localtime(time.time()))
        log_path = os.path.join('log/coco/', args.version, c_time)
        os.makedirs(log_path, exist_ok=True)

        writer = SummaryWriter(log_path)

    # keep training
    if args.resume is not None:
        print('keep training model: %s' % (args.resume))
        model.load_state_dict(torch.load(args.resume, map_location=device))

    # optimizer setup
    base_lr = args.lr
    tmp_lr = base_lr
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    max_epoch = cfg['max_epoch']
    epoch_size = len(dataset) // args.batch_size

    # start training loop
    t0 = time.time()

    for epoch in range(args.start_epoch, max_epoch):

        # use cos lr
        if args.cos and epoch > 20 and epoch <= max_epoch - 20:
            # use cos lr
            tmp_lr = 0.00001 + 0.5 * (base_lr - 0.00001) * (
                1 + math.cos(math.pi * (epoch - 20) * 1. / (max_epoch - 20)))
            set_lr(optimizer, tmp_lr)

        elif args.cos and epoch > max_epoch - 20:
            tmp_lr = 0.00001
            set_lr(optimizer, tmp_lr)

        # use step lr
        else:
            if epoch in cfg['lr_epoch']:
                tmp_lr = tmp_lr * 0.1
                set_lr(optimizer, tmp_lr)

        for iter_i, (images, targets) in enumerate(dataloader):
            # WarmUp strategy for learning rate
            if not args.no_warm_up:
                if epoch < args.wp_epoch:
                    tmp_lr = base_lr * pow((iter_i + epoch * epoch_size) * 1. /
                                           (args.wp_epoch * epoch_size), 4)
                    # tmp_lr = 1e-6 + (base_lr-1e-6) * (iter_i+epoch*epoch_size) / (epoch_size * (args.wp_epoch))
                    set_lr(optimizer, tmp_lr)

                elif epoch == args.wp_epoch and iter_i == 0:
                    tmp_lr = base_lr
                    set_lr(optimizer, tmp_lr)

            # to device
            images = images.to(device)

            # multi-scale trick
            if iter_i % 10 == 0 and iter_i > 0 and args.multi_scale:
                # randomly choose a new size
                size = random.randint(10, 19) * 32
                train_size = [size, size]
                model.set_grid(train_size)
            if args.multi_scale:
                # interpolate
                images = torch.nn.functional.interpolate(images,
                                                         size=train_size,
                                                         mode='bilinear',
                                                         align_corners=False)

            # make labels
            targets = [label.tolist() for label in targets]
            if args.version == 'yolo_v2' or args.version == 'slim_yolo_v2':
                targets = tools.gt_creator(input_size=train_size,
                                           stride=yolo_net.stride,
                                           label_lists=targets,
                                           anchor_size=anchor_size)
            else:
                targets = tools.multi_gt_creator(input_size=train_size,
                                                 strides=yolo_net.stride,
                                                 label_lists=targets,
                                                 anchor_size=anchor_size)
            targets = torch.tensor(targets).float().to(device)

            # forward and loss
            conf_loss, cls_loss, txtytwth_loss, total_loss = model(
                images, target=targets)

            # backprop
            total_loss.backward()
            optimizer.step()
            optimizer.zero_grad()

            # display
            if iter_i % 10 == 0:
                if args.tfboard:
                    # viz loss
                    writer.add_scalar('object loss', conf_loss.item(),
                                      iter_i + epoch * epoch_size)
                    writer.add_scalar('class loss', cls_loss.item(),
                                      iter_i + epoch * epoch_size)
                    writer.add_scalar('local loss', txtytwth_loss.item(),
                                      iter_i + epoch * epoch_size)

                t1 = time.time()
                print(
                    '[Epoch %d/%d][Iter %d/%d][lr %.6f]'
                    '[Loss: obj %.2f || cls %.2f || bbox %.2f || total %.2f || size %d || time: %.2f]'
                    % (epoch + 1, max_epoch, iter_i, epoch_size, tmp_lr,
                       conf_loss.item(), cls_loss.item(), txtytwth_loss.item(),
                       total_loss.item(), train_size[0], t1 - t0),
                    flush=True)

                t0 = time.time()

        # evaluation
        if (epoch + 1) % args.eval_epoch == 0:
            model.trainable = False
            model.set_grid(val_size)
            model.eval()

            # evaluate
            evaluator.evaluate(model)

            # convert to training mode.
            model.trainable = True
            model.set_grid(train_size)
            model.train()

        # save model
        if (epoch + 1) % 10 == 0:
            print('Saving state, epoch:', epoch + 1)
            torch.save(
                model.state_dict(),
                os.path.join(path_to_save,
                             args.version + '_' + repr(epoch + 1) + '.pth'))
def train(model, device):
    global cfg, hr

    # set GPU
    if not os.path.exists(args.save_folder):
        os.mkdir(args.save_folder)

    use_focal = False
    if args.use_focal == 1:
        print("Let's use focal loss for objectness !!!")
        use_focal = True

    if args.multi_scale == 1:
        print('Let us use the multi-scale trick.')
        ms_inds = range(len(cfg['multi_scale']))
        dataset = VOCDetection(root=args.dataset_root,
                               transform=SSDAugmentation([608, 608], MEANS))

    else:
        dataset = VOCDetection(root=args.dataset_root,
                               transform=SSDAugmentation(
                                   cfg['min_dim'], MEANS))

    from torch.utils.tensorboard import SummaryWriter
    c_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
    log_path = 'log/yolo_v2/voc2007/'  # + c_time
    if not os.path.exists(log_path):
        os.mkdir(log_path)

    writer = SummaryWriter(log_path)

    print(
        "----------------------------------------Object Detection--------------------------------------------"
    )
    print("Let's train OD network !")
    net = model
    net = net.to(device)

    # optimizer = optim.Adam(net.parameters())
    lr = args.lr
    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    # optimizer = optim.RMSprop(net.parameters(), lr=args.lr)

    # loss counters
    print("----------------------------------------------------------")
    print('Loading the dataset...')
    print('Training on:', dataset.name)
    print('The dataset size:', len(dataset))
    print('The obj weight : ', args.obj)
    print('The noobj weight : ', args.noobj)
    print("----------------------------------------------------------")

    input_size = cfg['min_dim']
    step_index = 0
    epoch_size = len(dataset) // args.batch_size
    # each part of loss weight
    obj_w = 1.0
    cla_w = 1.0
    box_w = 2.0

    data_loader = data.DataLoader(dataset,
                                  args.batch_size,
                                  num_workers=args.num_workers,
                                  shuffle=True,
                                  collate_fn=detection_collate,
                                  pin_memory=True)
    # create batch iterator
    iteration = 0

    # start training
    for epoch in range(cfg['max_epoch']):
        batch_iterator = iter(data_loader)

        # No WarmUp strategy or WarmUp stage has finished.
        if epoch in cfg['lr_epoch']:
            step_index += 1
            lr = adjust_learning_rate(optimizer, args.gamma, step_index)

        for images, targets in batch_iterator:
            # WarmUp strategy for learning rate
            if args.warm_up == 'yes':
                if epoch < args.wp_epoch:
                    lr = warmup_strategy(optimizer, epoch_size, iteration)
            iteration += 1

            # multi-scale trick
            if iteration % 10 == 0 and args.multi_scale == 1:
                ms_ind = random.sample(ms_inds, 1)[0]
                input_size = cfg['multi_scale'][int(ms_ind)]

            # multi scale
            if args.multi_scale == 1:
                images = torch.nn.functional.interpolate(images,
                                                         size=input_size,
                                                         mode='bilinear',
                                                         align_corners=True)

            targets = [label.tolist() for label in targets]
            if args.version == 'yolo_v2' or args.version == 'tiny_yolo_v2':
                targets = tools.gt_creator(input_size, yolo_net.stride,
                                           args.num_classes, targets)
            elif args.version == 'yolo_v3' or args.version == 'tiny_yolo_v3':
                targets = tools.multi_gt_creator(input_size, yolo_net.stride,
                                                 args.num_classes, targets)

            targets = torch.tensor(targets).float().to(device)

            # forward
            t0 = time.time()
            out = net(images.to(device))

            optimizer.zero_grad()

            obj_loss, class_loss, box_loss = tools.loss(
                out,
                targets,
                num_classes=args.num_classes,
                use_focal=use_focal,
                obj=args.obj,
                noobj=args.noobj)
            # print(obj_loss.item(), class_loss.item(), box_loss.item())
            total_loss = obj_w * obj_loss + cla_w * class_loss + box_w * box_loss
            # viz loss
            writer.add_scalar('object loss', obj_loss.item(), iteration)
            writer.add_scalar('class loss', class_loss.item(), iteration)
            writer.add_scalar('local loss', box_loss.item(), iteration)
            # backprop
            total_loss.backward()
            optimizer.step()
            t1 = time.time()

            if iteration % 10 == 0:
                print('timer: %.4f sec.' % (t1 - t0))
                # print(obj_loss.item(), class_loss.item(), box_loss.item())
                print('Epoch[%d / %d]' % (epoch+1, cfg['max_epoch']) + ' || iter ' + repr(iteration) + \
                        ' || Loss: %.4f ||' % (total_loss.item()) + ' || lr: %.8f ||' % (lr) + ' || input size: %d ||' % input_size[0], end=' ')

        if (epoch + 1) % 10 == 0:
            print('Saving state, epoch:', epoch + 1)
            torch.save(
                yolo_net.state_dict(), args.save_folder + '/' + args.version +
                '_' + repr(epoch + 1) + '.pth')
Exemple #7
0
def train():
    args = parse_args()
    data_dir = args.dataset_root

    path_to_save = os.path.join(args.save_folder, args.version)
    os.makedirs(path_to_save, exist_ok=True)

    hr = False
    if args.high_resolution:
        print('use hi-res backbone')
        hr = True

    cfg = coco_ab

    if args.cuda:
        print('use cuda')
        cudnn.benchmark = True
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    if args.multi_scale:
        print('Let us use the multi-scale trick.')
        input_size = [608, 608]
        dataset = COCODataset(data_dir=data_dir,
                              img_size=608,
                              transform=SSDAugmentation([608, 608],
                                                        mean=(0.406, 0.456,
                                                              0.485),
                                                        std=(0.225, 0.224,
                                                             0.229)),
                              debug=args.debug)
    else:
        input_size = cfg['min_dim']
        dataset = COCODataset(data_dir=data_dir,
                              img_size=cfg['min_dim'][0],
                              transform=SSDAugmentation(cfg['min_dim'],
                                                        mean=(0.406, 0.456,
                                                              0.485),
                                                        std=(0.225, 0.224,
                                                             0.229)),
                              debug=args.debug)

    # build model
    if args.version == 'yolo_v2':
        from models.yolo_v2 import myYOLOv2
        total_anchor_size = tools.get_total_anchor_size(name='COCO')

        yolo_net = myYOLOv2(device,
                            input_size=input_size,
                            num_classes=args.num_classes,
                            trainable=True,
                            anchor_size=total_anchor_size,
                            hr=hr)
        print('Let us train yolo-v2 on the COCO dataset ......')

    elif args.version == 'yolo_v3':
        from models.yolo_v3 import myYOLOv3
        total_anchor_size = tools.get_total_anchor_size(multi_level=True,
                                                        name='COCO')

        yolo_net = myYOLOv3(device,
                            input_size=input_size,
                            num_classes=args.num_classes,
                            trainable=True,
                            anchor_size=total_anchor_size,
                            hr=hr)
        print('Let us train yolo-v3 on the COCO dataset ......')

    elif args.version == 'tiny_yolo_v2':
        from models.tiny_yolo_v2 import YOLOv2tiny
        total_anchor_size = tools.get_total_anchor_size(name='COCO')

        yolo_net = YOLOv2tiny(device,
                              input_size=input_size,
                              num_classes=args.num_classes,
                              trainable=True,
                              anchor_size=total_anchor_size,
                              hr=hr)
        print('Let us train tiny-yolo-v2 on the COCO dataset ......')

    elif args.version == 'tiny_yolo_v3':
        from models.tiny_yolo_v3 import YOLOv3tiny
        total_anchor_size = tools.get_total_anchor_size(multi_level=True,
                                                        name='COCO')

        yolo_net = YOLOv3tiny(device,
                              input_size=input_size,
                              num_classes=args.num_classes,
                              trainable=True,
                              anchor_size=total_anchor_size,
                              hr=hr)
        print('Let us train tiny-yolo-v3 on the COCO dataset ......')

    else:
        print('Unknown version !!!')
        exit()

    print("Setting Arguments.. : ", args)
    print("----------------------------------------------------------")
    print('Loading the MSCOCO dataset...')
    print('Training model on:', dataset.name)
    print('The dataset size:', len(dataset))
    print("----------------------------------------------------------")

    # use tfboard
    if args.tfboard:
        print('use tensorboard')
        from torch.utils.tensorboard import SummaryWriter
        c_time = time.strftime('%Y-%m-%d %H:%M:%S',
                               time.localtime(time.time()))
        log_path = os.path.join('log/coco/', args.version, c_time)
        os.makedirs(log_path, exist_ok=True)

        writer = SummaryWriter(log_path)

    print('Let us train yolo-v2 on the MSCOCO dataset ......')

    model = yolo_net
    model.to(device).train()

    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=args.batch_size,
                                             shuffle=True,
                                             collate_fn=detection_collate,
                                             num_workers=args.num_workers)

    evaluator = COCOAPIEvaluator(data_dir=data_dir,
                                 img_size=cfg['min_dim'],
                                 device=device,
                                 transform=BaseTransform(cfg['min_dim'],
                                                         mean=(0.406, 0.456,
                                                               0.485),
                                                         std=(0.225, 0.224,
                                                              0.229)))

    # optimizer setup
    base_lr = args.lr
    tmp_lr = base_lr
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    max_epoch = cfg['max_epoch']
    epoch_size = len(dataset) // args.batch_size

    # start training loop
    t0 = time.time()

    for epoch in range(max_epoch):

        # use cos lr
        if args.cos and epoch > 20 and epoch <= max_epoch - 20:
            # use cos lr
            tmp_lr = 0.00001 + 0.5 * (base_lr - 0.00001) * (
                1 + math.cos(math.pi * (epoch - 20) * 1. / (max_epoch - 20)))
            set_lr(optimizer, tmp_lr)

        elif args.cos and epoch > max_epoch - 20:
            tmp_lr = 0.00001
            set_lr(optimizer, tmp_lr)

        # use step lr
        else:
            if epoch in cfg['lr_epoch']:
                tmp_lr = tmp_lr * 0.1
                set_lr(optimizer, tmp_lr)

        # COCO evaluation
        if (epoch + 1) % args.eval_epoch == 0:
            model.trainable = False
            model.set_grid(cfg['min_dim'])
            # evaluate
            ap50_95, ap50 = evaluator.evaluate(model)
            print('ap50 : ', ap50)
            print('ap50_95 : ', ap50_95)
            # convert to training mode.
            model.trainable = True
            model.set_grid(input_size)
            model.train()
            if args.tfboard:
                writer.add_scalar('val/COCOAP50', ap50, epoch + 1)
                writer.add_scalar('val/COCOAP50_95', ap50_95, epoch + 1)

        for iter_i, (images, targets) in enumerate(dataloader):
            # WarmUp strategy for learning rate
            if not args.no_warm_up:
                if epoch < args.wp_epoch:
                    # tmp_lr = base_lr * pow((iter_i+epoch*epoch_size)*1. / (args.wp_epoch*epoch_size), 4)
                    tmp_lr = 1e-6 + (base_lr - 1e-6) * (
                        iter_i + epoch * epoch_size) / (epoch_size *
                                                        (args.wp_epoch))
                    set_lr(optimizer, tmp_lr)

                elif epoch == args.wp_epoch and iter_i == 0:
                    tmp_lr = base_lr
                    set_lr(optimizer, tmp_lr)

            targets = [label.tolist() for label in targets]
            if args.version == 'yolo_v2' or args.version == 'tiny_yolo_v2':
                targets = tools.gt_creator(input_size,
                                           yolo_net.stride,
                                           targets,
                                           name='COCO')
            elif args.version == 'yolo_v3' or args.version == 'tiny_yolo_v3':
                targets = tools.multi_gt_creator(input_size,
                                                 yolo_net.stride,
                                                 targets,
                                                 name='COCO')

            # to device
            images = images.to(device)
            targets = torch.tensor(targets).float().to(device)

            # forward and loss
            conf_loss, cls_loss, txtytwth_loss, total_loss = model(
                images, target=targets)

            # backprop
            total_loss.backward()
            optimizer.step()
            optimizer.zero_grad()

            if args.tfboard:
                # viz loss
                writer.add_scalar('object loss', conf_loss.item(),
                                  iter_i + epoch * epoch_size)
                writer.add_scalar('class loss', cls_loss.item(),
                                  iter_i + epoch * epoch_size)
                writer.add_scalar('local loss', txtytwth_loss.item(),
                                  iter_i + epoch * epoch_size)

            if iter_i % 10 == 0:

                t1 = time.time()
                print(
                    '[Epoch %d/%d][Iter %d/%d][lr %.6f]'
                    '[Loss: obj %.2f || cls %.2f || bbox %.2f || total %.2f || size %d || time: %.2f]'
                    % (epoch + 1, max_epoch, iter_i, epoch_size, tmp_lr,
                       conf_loss.item(), cls_loss.item(), txtytwth_loss.item(),
                       total_loss.item(), input_size[0], t1 - t0),
                    flush=True)

                t0 = time.time()

            # multi-scale trick
            if iter_i % 10 == 0 and iter_i > 0 and args.multi_scale:
                # ms_ind = random.sample(ms_inds, 1)[0]
                # input_size = cfg['multi_scale'][int(ms_ind)]
                size = random.randint(10, 19) * 32
                input_size = [size, size]
                model.set_grid(input_size)

                # change input dim
                # But this operation will report bugs when we use more workers in data loader, so I have to use 0 workers.
                # I don't know how to make it suit more workers, and I'm trying to solve this question.
                dataloader.dataset.reset_transform(
                    SSDAugmentation(input_size,
                                    mean=(0.406, 0.456, 0.485),
                                    std=(0.225, 0.224, 0.229)))

        if (epoch + 1) % 10 == 0:
            print('Saving state, epoch:', epoch + 1)
            torch.save(
                model.state_dict(),
                os.path.join(path_to_save,
                             args.version + '_' + repr(epoch + 1) + '.pth'))
Exemple #8
0
def train(model):
    global cfg, hr, use_anchor

    # set GPU
    if not os.path.exists(args.save_folder):
        os.mkdir(args.save_folder)
    device = get_device()

    use_focal = False
    if args.use_focal == 1:
        print("Let's use focal loss for objectness !!!")
        use_focal = True

    dataset = VOCDetection(root=args.dataset_root,
                           transform=SSDAugmentation(cfg['min_dim'], MEANS))

    from torch.utils.tensorboard import SummaryWriter
    log_path = 'log/'
    if not os.path.exists(log_path):
        os.mkdir(log_path)

    writer = SummaryWriter(log_path)

    print(
        "----------------------------------------Object Detection--------------------------------------------"
    )
    print("Let's train OD network !")
    net = model
    net = net.to(device)

    # optimizer = optim.Adam(net.parameters())
    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    # loss counters
    print('Loading the dataset...')
    print('Training on:', dataset.name)
    print('The dataset size:', len(dataset))

    step_index = 0
    epoch_size = len(dataset) // args.batch_size
    # each part of loss weight
    obj_w = 1.0
    cla_w = 1.0
    box_w = 5.0

    data_loader = data.DataLoader(dataset,
                                  args.batch_size,
                                  num_workers=args.num_workers,
                                  shuffle=True,
                                  collate_fn=detection_collate,
                                  pin_memory=True)
    # create batch iterator
    iteration = 0

    # start training
    for epoch in range(cfg['max_epoch']):
        batch_iterator = iter(data_loader)

        # No WarmUp strategy or WarmUp tage has finished.
        if epoch in cfg['lr_epoch']:
            step_index += 1
            adjust_learning_rate(optimizer, args.gamma, step_index)

        for images, targets in batch_iterator:
            # WarmUp strategy for learning rate
            if args.warm_up == 'yes':
                if epoch < args.wp_epoch:
                    warmup_strategy(optimizer, args.gamma, epoch, epoch_size,
                                    iteration)
            iteration += 1
            # load train data
            # images, targets = next(batch_iterator)
            targets = [label.tolist() for label in targets]
            if args.version == 'yolo_v1_ms':
                targets = tools.multi_gt_creator(
                    input_size=cfg['min_dim'],
                    strides=yolo_net.stride,
                    scale_thresholds=cfg['scale_thresh'],
                    num_classes=args.num_classes,
                    label_lists=targets,
                    use_anchor=use_anchor)
            elif args.version == 'yolo_anchor_ms':
                targets = tools.multi_gt_creator(input_size=cfg['min_dim'],
                                                 strides=yolo_net.stride,
                                                 scale_thresholds=None,
                                                 num_classes=args.num_classes,
                                                 label_lists=targets,
                                                 use_anchor=use_anchor)
            else:
                targets = tools.gt_creator(cfg['min_dim'],
                                           yolo_net.stride,
                                           args.num_classes,
                                           targets,
                                           use_anchor=use_anchor)

            targets = torch.tensor(targets).float().to(device)

            # forward
            t0 = time.time()
            out = net(images.to(device))

            optimizer.zero_grad()

            obj_loss, class_loss, box_loss = tools.loss(out,
                                                        targets,
                                                        args.num_classes,
                                                        use_anchor=use_anchor,
                                                        use_focal=use_focal)
            # print(obj_loss.item(), class_loss.item(), box_loss.item())
            total_loss = obj_w * obj_loss + cla_w * class_loss + box_w * box_loss
            # viz loss
            writer.add_scalar('object loss', obj_loss.item(), iteration)
            writer.add_scalar('class loss', class_loss.item(), iteration)
            writer.add_scalar('local loss', box_loss.item(), iteration)
            # backprop
            total_loss.backward()
            optimizer.step()
            t1 = time.time()

            if iteration % 10 == 0:
                print('timer: %.4f sec.' % (t1 - t0))
                print('iter ' + repr(iteration) + ' || Loss: %.4f ||' %
                      (total_loss.item()) + ' || lr: %.8f ||' % (lr),
                      end=' ')

        if (epoch + 1) % 10 == 0:
            print('Saving state, epoch:', epoch + 1)
            torch.save(
                yolo_net.state_dict(), args.save_folder + '/' + args.version +
                '_' + repr(epoch + 1) + '.pth')
def train(model, device):
    global cfg, hr

    # set GPU
    os.makedirs(args.save_folder + args.version, exist_ok=True)

    use_focal = False
    if args.use_focal:
        print("Let's use focal loss for objectness !!!")
        use_focal = True

    if args.multi_scale:
        print('Let us use the multi-scale trick.')
        ms_inds = range(len(cfg['multi_scale']))
        dataset = VOCDetection(root=args.dataset_root,
                               transform=SSDAugmentation([608, 608],
                                                         mean=(0.406, 0.456,
                                                               0.485),
                                                         std=(0.225, 0.224,
                                                              0.229)))

    else:
        dataset = VOCDetection(root=args.dataset_root,
                               transform=SSDAugmentation(cfg['min_dim'],
                                                         mean=(0.406, 0.456,
                                                               0.485),
                                                         std=(0.225, 0.224,
                                                              0.229)))

    from torch.utils.tensorboard import SummaryWriter
    c_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
    log_path = 'log/voc/' + c_time
    os.makedirs(log_path, exist_ok=True)

    writer = SummaryWriter(log_path)

    print(
        "----------------------------------------Object Detection--------------------------------------------"
    )
    print("Let's train OD network !")
    net = model
    net = net.to(device)

    lr = args.lr
    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    # loss counters
    print("----------------------------------------------------------")
    print('Loading the dataset...')
    print('Training on:', dataset.name)
    print('The dataset size:', len(dataset))
    print('The obj weight : ', args.obj)
    print('The noobj weight : ', args.noobj)
    print("----------------------------------------------------------")

    input_size = cfg['min_dim']
    step_index = 0
    epoch_size = len(dataset) // args.batch_size
    # each part of loss weight
    obj_w = 1.0
    cla_w = 1.0
    box_w = 1.0

    data_loader = data.DataLoader(dataset,
                                  args.batch_size,
                                  num_workers=args.num_workers,
                                  shuffle=True,
                                  collate_fn=detection_collate,
                                  pin_memory=True)
    # create batch iterator
    iteration = 0
    t0 = time.time()

    # start training
    for epoch in range(cfg['max_epoch']):
        batch_iterator = iter(data_loader)

        # use cos lr
        if args.cos and epoch > 20 and epoch <= cfg['max_epoch'] - 20:
            # use cos lr
            lr = cos_lr(optimizer, epoch, cfg['max_epoch'])
        elif args.cos and epoch > cfg['max_epoch'] - 20:
            lr = 0.00001
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
        # use step lr
        else:
            if epoch in cfg['lr_epoch']:
                step_index += 1
                lr = adjust_learning_rate(optimizer, args.gamma, step_index)

        for images, targets in batch_iterator:
            # WarmUp strategy for learning rate
            if not args.no_warm_up == 'yes':
                if epoch < args.wp_epoch:
                    lr = warmup_strategy(optimizer, epoch_size, iteration)

            iteration += 1

            # multi-scale trick
            if iteration % 10 == 0 and args.multi_scale:
                ms_ind = random.sample(ms_inds, 1)[0]
                input_size = cfg['multi_scale'][int(ms_ind)]

            # multi scale
            if args.multi_scale:
                images = torch.nn.functional.interpolate(images,
                                                         size=input_size,
                                                         mode='bilinear',
                                                         align_corners=True)

            targets = [label.tolist() for label in targets]
            if args.version == 'yolo_v2' or args.version == 'tiny_yolo_v2':
                targets = tools.gt_creator(input_size, yolo_net.stride,
                                           targets)
            elif args.version == 'yolo_v3' or args.version == 'tiny_yolo_v3':
                targets = tools.multi_gt_creator(input_size, yolo_net.stride,
                                                 targets)

            targets = torch.tensor(targets).float().to(device)

            # forward
            out = net(images.to(device))

            optimizer.zero_grad()

            obj_loss, class_loss, box_loss = tools.loss(
                out,
                targets,
                num_classes=args.num_classes,
                use_focal=use_focal,
                obj=args.obj,
                noobj=args.noobj)
            # print(obj_loss.item(), class_loss.item(), box_loss.item())
            total_loss = obj_w * obj_loss + cla_w * class_loss + box_w * box_loss
            # viz loss
            writer.add_scalar('object loss', obj_loss.item(), iteration)
            writer.add_scalar('class loss', class_loss.item(), iteration)
            writer.add_scalar('local loss', box_loss.item(), iteration)
            # backprop
            total_loss.backward()
            optimizer.step()

            if iteration % 10 == 0:
                t1 = time.time()
                print(
                    '[Epoch %d/%d][Iter %d][lr %.8f]'
                    '[Loss: obj %.2f || cls %.2f || bbox %.2f || total %.2f || imgsize %d || time: %.2f]'
                    % (epoch + 1, cfg['max_epoch'], iteration, lr,
                       obj_loss.item(), class_loss.item(), box_loss.item(),
                       total_loss.item(), input_size[0], t1 - t0),
                    flush=True)

                t0 = time.time()

        if (epoch + 1) % 10 == 0:
            print('Saving state, epoch:', epoch + 1)
            torch.save(
                yolo_net.state_dict(),
                os.path.join(args.save_folder + args.version,
                             args.version + '_' + repr(epoch + 1) + '.pth'))