예제 #1
0
def train():
    #net.train()
    epoch = 0 + args.resume_epoch
    print('Loading Dataset...')

    dataset = WiderFaceDetection(training_dataset, preproc(img_dim, rgb_mean))

    epoch_size = math.ceil(len(dataset) / batch_size)
    max_iter = max_epoch * epoch_size

    stepvalues = (cfg['decay1'] * epoch_size, cfg['decay2'] * epoch_size)
    step_index = 0

    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
    else:
        start_iter = 0

    for iteration in range(start_iter, max_iter):
        if iteration % epoch_size == 0:
            # create batch iterator
            batch_iterator = iter(
                data.DataLoader(dataset,
                                batch_size,
                                shuffle=True,
                                num_workers=num_workers,
                                collate_fn=detection_collate))
            if (epoch % 10 == 0 and epoch > 0) or (epoch % 5 == 0
                                                   and epoch > cfg['decay1']):
                torch.save(
                    net.state_dict(), save_folder + cfg['name'] + '_epoch_' +
                    str(epoch) + '.pth')
            epoch += 1

        load_t0 = time.time()
        if iteration in stepvalues:
            step_index += 1
        lr = adjust_learning_rate(optimizer, gamma, epoch, step_index,
                                  iteration, epoch_size)

        # load train data
        images, targets = next(batch_iterator)
        images = images.cuda()
        targets = [anno.cuda() for anno in targets]

        # forward

        # with torch.no_grad():
        out = net(images)

        # backprop
        optimizer.zero_grad()
        loss_l, loss_c, loss_landm = criterion(out, priors, targets)
        loss = cfg['loc_weight'] * loss_l + loss_c + loss_landm
        loss.backward()
        optimizer.step()
        load_t1 = time.time()
        batch_time = load_t1 - load_t0
        eta = int(batch_time * (max_iter - iteration))
        print(
            'Epoch:{}/{} || Epochiter: {}/{} || Iter: {}/{} || Loc: {:.4f} Cla: {:.4f} Landm: {:.4f} || LR: {:.8f} || Batchtime: {:.4f} s || ETA: {}'
            .format(epoch, max_epoch, (iteration % epoch_size) + 1, epoch_size,
                    iteration + 1, max_iter, loss_l.item(), loss_c.item(),
                    loss_landm.item(), lr, batch_time,
                    str(datetime.timedelta(seconds=eta))))

    torch.save(net.state_dict(), save_folder + cfg['name'] + '_Final.pth')
예제 #2
0
def train():
    net.train()
    # loss counters
    loc_loss = 0  # epoch
    conf_loss = 0
    epoch = 0 + args.resume_epoch
    print('Loading Dataset...')

    if args.dataset == 'VOC':
        dataset = VOCDetection(VOCroot, train_sets, preproc(
            img_dim, rgb_means, p), AnnotationTransform())
    elif args.dataset == 'COCO':
        dataset = COCODetection(COCOroot, train_sets, preproc(
            img_dim, rgb_means, p))
    else:
        print('Only VOC and COCO are supported now!')
        return

    epoch_size = len(dataset) // args.batch_size
    max_iter = args.max_epoch * epoch_size

    stepvalues_VOC = (150 * epoch_size, 200 * epoch_size, 250 * epoch_size)
    stepvalues_COCO = (90 * epoch_size, 120 * epoch_size, 140 * epoch_size)
    stepvalues = (stepvalues_VOC,stepvalues_COCO)[args.dataset=='COCO']
    print('Training',args.version, 'on', dataset.name)
    step_index = 0

    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
    else:
        start_iter = 0

    lr = args.lr
    for iteration in range(start_iter, max_iter):
        if iteration % epoch_size == 0:
            # create batch iterator
            batch_iterator = iter(data.DataLoader(dataset, batch_size,
                                                  shuffle=True, num_workers=args.num_workers, collate_fn=detection_collate))
            loc_loss = 0
            conf_loss = 0
            if (epoch % 10 == 0 and epoch > 0) or (epoch % 5 ==0 and epoch > 200):
                torch.save(net.state_dict(), args.save_folder+args.version+'_'+args.dataset + '_epoches_'+
                           repr(epoch) + '.pth')
            epoch += 1

        load_t0 = time.time()
        if iteration in stepvalues:
            step_index += 1
        lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index, iteration, epoch_size)


        # load train data
        images, targets = next(batch_iterator)
        
        #print(np.sum([torch.sum(anno[:,-1] == 2) for anno in targets]))

        if args.cuda:
            images = Variable(images.cuda())
            targets = [Variable(anno.cuda(),volatile=True) for anno in targets]
        else:
            images = Variable(images)
            targets = [Variable(anno, volatile=True) for anno in targets]
        # forward
        t0 = time.time()
        out = net(images)
        # backprop
        optimizer.zero_grad()
        loss_l, loss_c = criterion(out, priors, targets)
        loss = loss_l + loss_c
        loss.backward()
        optimizer.step()
        t1 = time.time()
        loc_loss += loss_l.data[0]
        conf_loss += loss_c.data[0]
        load_t1 = time.time()
        if iteration % 10 == 0:
            print('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size)
                  + '|| Totel iter ' +
                  repr(iteration) + ' || L: %.4f C: %.4f||' % (
                loss_l.data[0],loss_c.data[0]) + 
                'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' % (lr))

    torch.save(net.state_dict(), args.save_folder +
               'Final_' + args.version +'_' + args.dataset+ '.pth')
예제 #3
0
def train():
    net.train()
    # loss counters
    loc_loss = 0  # epoch
    conf_loss = 0
    epoch = 0
    print('Loading Dataset...')

    dataset = VOCDetection(VOCroot, train_sets, preproc(img_dim, rgb_means, p),
                           AnnotationTransform())

    epoch_size = len(dataset) // args.batch_size
    max_iter = args.max_epoch * epoch_size

    stepvalues = (60 * epoch_size, 80 * epoch_size)
    step_index = 0
    start_iter = 0

    lr = args.lr
    for iteration in range(start_iter, max_iter):
        if iteration % epoch_size == 0:
            # create batch iterator
            batch_iterator = iter(
                data.DataLoader(dataset,
                                batch_size,
                                shuffle=True,
                                num_workers=8,
                                collate_fn=detection_collate))
            loc_loss = 0
            conf_loss = 0
            epoch += 1

        load_t0 = time.time()
        if iteration in stepvalues:
            step_index += 1
        lr = adjust_learning_rate(optimizer, 0.1, epoch, step_index, iteration,
                                  epoch_size)

        images, targets = next(batch_iterator)

        images = Variable(images.cuda())
        targets = [Variable(anno.cuda()) for anno in targets]

        # forward
        t0 = time.time()
        out = net(images)
        # backprop
        optimizer.zero_grad()
        loss_l, loss_c = criterion(out, priors, targets)
        loss = loss_l + loss_c
        loss.backward()
        optimizer.step()
        t1 = time.time()
        loc_loss += loss_l.item()
        conf_loss += loss_c.item()
        load_t1 = time.time()

        # visualization
        visualize_total_loss(writer, loss.item(), iteration)
        visualize_loc_loss(writer, loss_l.item(), iteration)
        visualize_conf_loss(writer, loss_c.item(), iteration)

        if iteration % 10 == 0:
            print('Epoch:' + repr(epoch) + ' || epochiter: ' +
                  repr(iteration % epoch_size) + '/' + repr(epoch_size) +
                  '|| Totel iter ' + repr(iteration) +
                  ' || L: %.4f C: %.4f||' % (loss_l.item(), loss_c.item()) +
                  'Batch time: %.4f sec. ||' % (load_t1 - load_t0) +
                  'LR: %.8f' % (lr))

        if iteration % epoch_size == 0 and epoch > 80 and epoch % 5 == 0:
            torch.save(net.state_dict(),
                       args.save_folder + 'epoches_' + repr(epoch) + '.pth')
예제 #4
0
def train():
    net.train()
    # loss counters
    loc_loss = 0  # epoch
    conf_loss = 0
    epoch = 0 + args.resume_epoch
    print('Loading Dataset...')
    f_writer.write('Loading Dataset...\n')

    if args.dataset == 'VOC':
        dataset = VOCDetection(VOCroot, train_sets,
                               preproc(img_dim, rgb_means, p),
                               AnnotationTransform())
    elif args.dataset == 'COCO':
        dataset = COCODetection(COCOroot, train_sets,
                                preproc(img_dim, rgb_means, p))
    else:
        print('Only VOC and COCO are supported now!')
        return

    epoch_size = len(dataset) // args.batch_size
    max_iter = args.max_epoch * epoch_size

    stepvalues_VOC = (150 * epoch_size, 200 * epoch_size, 250 * epoch_size)
    stepvalues_COCO = (90 * epoch_size, 120 * epoch_size, 140 * epoch_size)
    stepvalues = (stepvalues_VOC, stepvalues_COCO)[args.dataset == 'COCO']
    print('Training', args.version, 'on', dataset.name)
    f_writer.write('Training' + args.version + 'on' + dataset.name + '\n')
    step_index = 0

    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
    else:
        start_iter = 0

    lr = args.lr
    for iteration in range(start_iter, max_iter):
        if iteration % epoch_size == 0:
            # create batch iterator
            batch_iterator = iter(
                data.DataLoader(dataset,
                                batch_size,
                                shuffle=True,
                                num_workers=args.num_workers,
                                collate_fn=detection_collate))
            loc_loss = 0
            conf_loss = 0
            if (epoch % 40 == 0 and epoch > 0) or (epoch % 10 == 0
                                                   and epoch > 200):
                torch.save(
                    net.state_dict(),
                    args.save_folder + args.version + '_' + args.dataset +
                    '_epoches_' + repr(epoch) + '.pth' + args.extra)
            epoch += 1

        load_t0 = time.time()
        if iteration in stepvalues:
            step_index += 1
        lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index,
                                  iteration, epoch_size)

        # load train data
        images, targets = next(batch_iterator)

        #print(np.sum([torch.sum(anno[:,-1] == 2) for anno in targets]))

        if args.cuda:
            images = Variable(images.cuda())
            targets = [
                Variable(anno.cuda(), volatile=True) for anno in targets
            ]
        else:
            images = Variable(images)
            targets = [Variable(anno, volatile=True) for anno in targets]
        # forward
        t0 = time.time()
        out = net(images)
        # backprop
        optimizer.zero_grad()
        loss_l, loss_c, _ = criterion(out, priors, targets)
        loss = loss_l + loss_c
        loss.backward()
        optimizer.step()
        t1 = time.time()
        loc_loss += loss_l.data[0]
        conf_loss += loss_c.data[0]
        load_t1 = time.time()
        if iteration % 10 == 0:
            print('Epoch:' + repr(epoch) + ' || epochiter: ' +
                  repr(iteration % epoch_size) + '/' + repr(epoch_size) +
                  '|| Totel iter ' + repr(iteration) +
                  ' || L: %.4f C: %.4f||' % (loss_l.data[0], loss_c.data[0]) +
                  'Batch time: %.4f sec. ||' % (load_t1 - load_t0) +
                  'LR: %.8f' % (lr))
            f_writer.write('Epoch:' + repr(epoch) + ' || epochiter: ' +
                           repr(iteration % epoch_size) + '/' +
                           repr(epoch_size) + '|| Totel iter ' +
                           repr(iteration) + ' || L: %.4f C: %.4f||' %
                           (loss_l.data[0], loss_c.data[0]) +
                           'Batch time: %.4f sec. ||' % (load_t1 - load_t0) +
                           'LR: %.8f' % (lr) + '\n')

    torch.save(
        net.state_dict(), args.save_folder + 'Final_' + args.version + '_' +
        args.dataset + '.pth' + args.extra)
    f_writer.write('training finished!\n')
    f_writer.close()
예제 #5
0
def train():
    net.train()
    epoch = 0 + args.resume_epoch
    print('Loading Dataset...')

    dataset = VOCDetection(args.training_dataset, preproc(img_dim, rgb_means),
                           AnnotationTransform())

    epoch_size = math.ceil(len(dataset) / args.batch_size)
    max_iter = args.max_epoch * epoch_size

    stepvalues = (200 * epoch_size, 250 * epoch_size)
    step_index = 0

    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
    else:
        start_iter = 0

    for iteration in range(start_iter, max_iter):
        if iteration % epoch_size == 0:
            # create batch iterator
            batch_iterator = iter(
                data.DataLoader(dataset,
                                batch_size,
                                shuffle=True,
                                num_workers=args.num_workers,
                                collate_fn=detection_collate))
            if (epoch % 10 == 0 and epoch > 0) or (epoch % 5 == 0
                                                   and epoch > 200):
                torch.save(
                    net.state_dict(), args.save_folder + 'FaceBoxes_epoch_' +
                    repr(epoch) + '.pth')
            epoch += 1

        load_t0 = time.time()
        if iteration in stepvalues:
            step_index += 1
        lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index,
                                  iteration, epoch_size)

        # load train data
        images, targets, masks = next(batch_iterator)
        images = images.to(device)
        targets = [anno.to(device) for anno in targets]
        masks = [mask.to(device) for mask in masks]

        # def mask_to_pic(img, mask, name):
        #     mask = mask.cpu().numpy()
        #     img = np.uint8(img.cpu().numpy().transpose(1, 2, 0) + rgb_means)
        #     cv2.imwrite(name, img * cv2.resize(mask, (1024, 1024), interpolation=cv2.INTER_NEAREST)[..., None])
        # for i in range(len(masks[1])):
        #     img = np.uint8(images[i].cpu().numpy().transpose(1, 2, 0) + rgb_means)
        #     bboxs = targets[i][:, :-1].cpu().numpy().astype(np.int)
        #     cv2.imwrite('sample/test{}_0.jpg'.format(i), draw_bbox(img.copy(), bboxs))
        #     mask_to_pic(images[i], masks[0][i], 'sample/test{}_1.jpg'.format(i))
        #     mask_to_pic(images[i], masks[1][i], 'sample/test{}_2.jpg'.format(i))
        #     mask_to_pic(images[i], masks[2][i], 'sample/test{}_3.jpg'.format(i))
        # forward
        att, out = net(images)

        # backprop
        optimizer.zero_grad()
        loss_m = criterion_m(att[0], masks[0])
        for ii in range(1, 3):
            loss_m += criterion_m(att[ii], masks[ii])
        loss_l, loss_c = criterion(out, priors, targets)
        loss = cfg['loc_weight'] * loss_l + loss_c + cfg['mask_weight'] * loss_m
        loss.backward()
        optimizer.step()
        load_t1 = time.time()
        print('Epoch:' + repr(epoch) + '/' + repr(args.max_epoch) +
              ' || epochiter: ' + repr(iteration % epoch_size) + '/' +
              repr(epoch_size) + '|| Totel iter ' + repr(iteration) +
              ' || L: %.4f C: %.4f M: %.4f||' %
              (cfg['loc_weight'] * loss_l.item(), loss_c.item(),
               cfg['mask_weight'] * loss_m.item()) +
              'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' %
              (lr))

    torch.save(net.state_dict(), args.save_folder + 'Final_FaceBoxes.pth')
예제 #6
0
def main():
    global args
    args = arg_parse()
    save_folder = args.save_folder
    batch_size = args.batch_size
    bgr_means = (104, 117, 123)
    weight_decay = 0.0005
    p = 0.6
    gamma = 0.1
    momentum = 0.9
    dataset_name = args.dataset
    size = args.size
    channel_size = args.channel_size
    thresh = args.confidence_threshold
    use_refine = False
    if args.version.split("_")[0] == "refine":
        use_refine = True
    if dataset_name[0] == "V":
        cfg = cfg_dict["VOC"][args.version][str(size)]
        trainvalDataset = VOCDetection
        dataroot = VOCroot
        targetTransform = AnnotationTransform()
        valSet = datasets_dict["VOC2007"]
        top_k = 200
    else:
        cfg = cfg_dict["COCO"][args.version][str(size)]
        trainvalDataset = COCODetection
        dataroot = COCOroot
        targetTransform = None
        valSet = datasets_dict["COCOval"]
        top_k = 300
    num_classes = cfg['num_classes']
    start_epoch = args.resume_epoch
    epoch_step = cfg["epoch_step"]
    end_epoch = cfg["end_epoch"]
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)
    if args.cuda and torch.cuda.is_available():
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
    else:
        torch.set_default_tensor_type('torch.FloatTensor')
    net = model_builder(args.version, cfg, "train", int(size), num_classes,
                        args.channel_size)
    print(net)

    if args.resume_net == None:
        net.load_weights(pretrained_model[args.version])
    else:
        state_dict = torch.load(args.resume_net)
        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            head = k[:7]
            if head == 'module.':
                name = k[7:]  # remove `module.`
            else:
                name = k
            new_state_dict[name] = v
        net.load_state_dict(new_state_dict)
        print('Loading resume network...')
    if args.ngpu > 1:
        net = torch.nn.DataParallel(net)
    if args.cuda:
        net.cuda()
        cudnn.benchmark = True
    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=momentum,
                          weight_decay=weight_decay)
    criterion = list()
    if use_refine:
        detector = Detect(num_classes, 0, cfg, use_arm=use_refine)
        arm_criterion = RefineMultiBoxLoss(2, 0.5, True, 0, True, 3, 0.5,
                                           False, args.cuda)

        odm_criterion = RefineMultiBoxLoss(num_classes, 0.5, True, 0, True, 3,
                                           0.5, False, 0.01, args.cuda)
        criterion.append(arm_criterion)
        criterion.append(odm_criterion)
    else:
        detector = Detect(num_classes, 0, cfg, use_arm=use_refine)
        ssd_criterion = MultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5,
                                     False, args.cuda)
        criterion.append(ssd_criterion)
    TrainTransform = preproc(cfg["img_wh"], bgr_means, p)
    ValTransform = BaseTransform(cfg["img_wh"], bgr_means, (2, 0, 1))

    val_dataset = trainvalDataset(dataroot, valSet, ValTransform,
                                  targetTransform, dataset_name)
    val_loader = data.DataLoader(val_dataset,
                                 batch_size,
                                 shuffle=False,
                                 num_workers=args.num_workers,
                                 collate_fn=detection_collate)

    for epoch in range(start_epoch + 1, end_epoch + 1):
        train_dataset = trainvalDataset(dataroot, datasets_dict[dataset_name],
                                        TrainTransform, targetTransform,
                                        dataset_name)
        epoch_size = len(train_dataset)
        train_loader = data.DataLoader(train_dataset,
                                       batch_size,
                                       shuffle=True,
                                       num_workers=args.num_workers,
                                       collate_fn=detection_collate)
        train(train_loader, net, criterion, optimizer, epoch, epoch_step,
              gamma, use_refine)
        if (epoch % 10 == 0) or (epoch % 5 == 0 and epoch >= 200):
            save_checkpoint(net, epoch, size)
        if (epoch >= 200 and epoch % 10 == 0):
            eval_net(val_dataset,
                     val_loader,
                     net,
                     detector,
                     cfg,
                     ValTransform,
                     top_k,
                     thresh=thresh,
                     batch_size=batch_size)
    eval_net(val_dataset,
             val_loader,
             net,
             detector,
             cfg,
             ValTransform,
             top_k,
             thresh=thresh,
             batch_size=batch_size)
    save_checkpoint(net, end_epoch, size)
                      lr=args.lr,
                      momentum=args.momentum,
                      weight_decay=args.weight_decay)
# optimizer = optim.RMSprop(net.parameters(), lr=args.lr,alpha = 0.9, eps=1e-08,
#                     momentum=args.momentum, weight_decay=args.weight_decay)

criterion = MultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5, False)
priorbox = PriorBox(cfg)
with torch.no_grad():
    priors = priorbox.forward()  # dataset
print('Loading Dataset...')
if args.dataset == 'VOC':
    testset = VOCDetection(VOCroot, [('2007', 'test')], None,
                           AnnotationTransform())
    train_dataset = VOCDetection(
        VOCroot, train_sets, preproc(img_dim, rgb_means, p=p, rgb_std=rgb_std),
        AnnotationTransform())
elif args.dataset == 'COCO':
    testset = COCODetection(COCOroot, [('2014', 'minival')], None)
    train_dataset = COCODetection(
        COCOroot, train_sets, preproc(img_dim, rgb_means, p=p,
                                      rgb_std=rgb_std))
else:
    print('Only VOC and COCO are supported now!')
    exit()


def train():
    net.train()
    # loss counters
    loc_loss = 0  # epoch
예제 #8
0
detector = Detect(num_classes, 0, cfg)
optimizer = optim.SGD(net.parameters(), lr=args.lr,
                      momentum=args.momentum, weight_decay=args.weight_decay)
# optimizer = optim.RMSprop(net.parameters(), lr=args.lr,alpha = 0.9, eps=1e-08,
#                      momentum=args.momentum, weight_decay=args.weight_decay)

criterion = MultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5, False)
priorbox = PriorBox(cfg)
with torch.no_grad():
    priors = priorbox.forward()
# dataset
print('Loading Dataset...')
if args.dataset == 'VOC':
    testset = VOCDetection(
        VOCroot, [('2007', 'test')], None, AnnotationTransform())
    train_dataset = VOCDetection(VOCroot, train_sets, preproc(
        img_dim, rgb_means, rgb_std, p), AnnotationTransform())
elif args.dataset == 'COCO':
    testset = COCODetection(
        #COCOroot, [('2014', 'minival')], None)
        COCOroot, [('2015', 'test-dev')], None)
    train_dataset = COCODetection(COCOroot, train_sets, preproc(
        img_dim, rgb_means, rgb_std, p))
else:
    print('Only VOC and COCO are supported now!')
    exit()


def train():
    net.train()
    # loss counters
    loc_loss = 0  # epoch
예제 #9
0
                      momentum=args.momentum,
                      weight_decay=args.weight_decay)
# optimizer = optim.RMSprop(net.parameters(), lr=args.lr,alpha = 0.9, eps=1e-08,
#                      momentum=args.momentum, weight_decay=args.weight_decay)

criterion = MultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5, False)
priorbox = PriorBox(cfg)
priors = Variable(priorbox.forward(), volatile=True)
# dataset

if args.dataset == 'VOC':
    print('Loading Dataset...')
    testset = VOCDetection(VOCroot, [('2007', 'test')], None,
                           AnnotationTransform())
    train_dataset = VOCDetection(VOCroot, train_sets,
                                 preproc(img_dim, rgb_means, rgb_std, p),
                                 AnnotationTransform())
elif args.dataset == 'COCO':
    print('Loading Dataset...')
    if args.classes == 'youtube_bb':
        testset = COCODetection(COCOroot, [('2017', 'val')],
                                None,
                                classes='youtube_bb')
        train_dataset = COCODetection(COCOroot,
                                      train_sets,
                                      preproc(img_dim, rgb_means, rgb_std, p),
                                      classes='youtube_bb',
                                      box_num=args.box_num)
    elif args.classes == 'youtube_bb_sub':
        testset = COCODetection(COCOroot, [('2017', 'val')],
                                None,