コード例 #1
0
    def setup(self):
        if not os.path.exists(self.system_dict["params"]["save_folder"]):
            os.mkdir(self.system_dict["params"]["save_folder"])
        self.system_dict["local"]["cfg"] = None
        if self.system_dict["params"]["network"] == "mobile0.25":
            self.system_dict["local"]["cfg"] = cfg_mnet
        elif self.system_dict["params"]["network"] == "resnet50":
            self.system_dict["local"]["cfg"] = cfg_re50

        self.system_dict["local"]["cfg"]["gpu_train"] = self.system_dict[
            "params"]["use_gpu"]
        self.system_dict["local"]["cfg"]["batch_size"] = self.system_dict[
            "params"]["batch_size"]
        self.system_dict["local"]["cfg"]["ngpu"] = self.system_dict["params"][
            "num_gpu"]
        self.system_dict["local"]["cfg"]["epoch"] = self.system_dict["params"][
            "num_epochs"]

        rgb_mean = (104, 117, 123)  # bgr order
        img_dim = self.system_dict["local"]["cfg"]['image_size']
        print('Loading Dataset...')
        self.system_dict["local"]["dataset"] = DataLoaderWithoutLandmarks(
            self.system_dict["params"]["img_dir"],
            self.system_dict["params"]["anno_file"],
            preproc(img_dim, rgb_mean))
        print("Done...")
コード例 #2
0
ファイル: core.py プロジェクト: lvjunxiu/M2Det
def get_dataloader(cfg, dataset, setname='train_sets'):
    _preproc = preproc(cfg.model.input_size, cfg.model.rgb_means, cfg.model.p)
    Dataloader_function = {'VOC': VOCDetection, 'COCO': COCODetection}
    _Dataloader_function = Dataloader_function[dataset]
    if setname == 'train_sets':
        dataset = _Dataloader_function(
            cfg.COCOroot if dataset == 'COCO' else cfg.VOCroot,
            getattr(cfg.dataset, dataset)[setname], _preproc)
    else:
        dataset = _Dataloader_function(
            cfg.COCOroot if dataset == 'COCO' else cfg.VOCroot,
            getattr(cfg.dataset, dataset)[setname], None)
    return dataset
コード例 #3
0
def load_data(cfg, phase):
    if phase == 'train':
        dataset = dataset_map[cfg.dataset](cfg.dataset_dir, cfg.train_sets,
                                           preproc(cfg.image_size[0],
                                                   cfg.rgb_means, cfg.rgb_std,
                                                   cfg.zoom_p))
        data_loader = data.DataLoader(dataset,
                                      cfg.train_batch_size,
                                      num_workers=cfg.num_workers,
                                      shuffle=True,
                                      collate_fn=detection_collate,
                                      pin_memory=True)
    if phase == 'eval':
        dataset = dataset_map[cfg.dataset](cfg.dataset_dir, cfg.test_sets,
                                           None)
        data_loader = data.DataLoader(dataset,
                                      cfg.test_batch_size,
                                      num_workers=cfg.num_workers,
                                      shuffle=False,
                                      collate_fn=detection_collate,
                                      pin_memory=True)
    if phase == 'test':
        dataset = dataset_map[cfg.dataset](cfg.dataset_dir, cfg.test_sets,
                                           None)
        data_loader = data.DataLoader(dataset,
                                      cfg.test_batch_size,
                                      num_workers=cfg.num_workers,
                                      shuffle=False,
                                      collate_fn=detection_collate,
                                      pin_memory=True)
    if phase == 'visualize':
        dataset = dataset_map[cfg.dataset](cfg.dataset_dir, cfg.test_sets,
                                           None)
        data_loader = data.DataLoader(dataset,
                                      cfg.test_batch_size,
                                      num_workers=cfg.num_workers,
                                      shuffle=False,
                                      collate_fn=detection_collate,
                                      pin_memory=True)
    return data_loader
コード例 #4
0
ファイル: wider_voc.py プロジェクト: 124451/first_headpose
            1) (tensor) batch of images stacked on their 0 dim
            2) (list of tensors) annotations for a given image are stacked on 0 dim
    """
    targets = []
    imgs = []
    for _, sample in enumerate(batch):
        for _, tup in enumerate(sample):
            if torch.is_tensor(tup):
                print("tup:", tup.shape)
                imgs.append(tup)
            elif isinstance(tup, type(np.empty(0))):
                annos = torch.from_numpy(tup).float()
                print("annos:", annos.shape, annos)
                targets.append(annos)
    return (torch.stack(imgs, 0), targets)


if __name__ == '__main__':
    #dataset = VOCDetection('/home/codingbo/FaceBoxes.PyTorch-master/data/WIDER_FACE', preproc(img_dim, rgb_means), AnnotationTransform())
    img_dim = 1024
    rgb_means = (104, 117, 123)  # bgr order
    dataset_2 = Face4k('/home/codingbo/WORK/data/4K-Face',
                       preproc(img_dim, rgb_means))
    batch_iterator = iter(
        data.DataLoader(dataset_2,
                        2,
                        shuffle=True,
                        num_workers=1,
                        collate_fn=detection_collate))
    images, targets = next(batch_iterator)
コード例 #5
0
ファイル: train.py プロジェクト: jianzhnie/FaceBoxes
def train():
    net.train()
    epoch = 0 + args.resume_epoch
    print('Loading Dataset...')

    dataset = VOCDetection(training_dataset, preproc(img_dim, rgb_mean),
                           AnnotationTransform())

    epoch_size = math.ceil(len(dataset) / batch_size)
    max_iter = max_epoch * epoch_size

    stepvalues = (70 * epoch_size, 100 * epoch_size, 120 * epoch_size)
    step_index = 0

    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
    else:
        start_iter = 0

    for iteration in range(start_iter, max_iter):
        if iteration % epoch_size == 0:
            # create batch iterator
            batch_iterator = iter(
                data.DataLoader(dataset,
                                batch_size,
                                shuffle=True,
                                num_workers=num_workers,
                                collate_fn=detection_collate))
            if (epoch % 5 == 0 and epoch > 0) or (epoch % 2 == 0
                                                  and epoch > 50):
                torch.save(
                    net.state_dict(),
                    os.path.join(save_folder,
                                 'FaceBoxes_epoch_' + str(epoch) + '.pth'))
            epoch += 1

        load_t0 = time.time()
        if iteration in stepvalues:
            step_index += 1
        lr = adjust_learning_rate(optimizer, gamma, epoch, step_index,
                                  iteration, epoch_size)

        # load train data
        images, targets = next(batch_iterator)
        images = images.to(device)
        targets = [anno.to(device) for anno in targets]

        # forward
        out = net(images)

        # backprop
        optimizer.zero_grad()
        loss_l, loss_c = criterion(out, priors, targets)
        loss = cfg['loc_weight'] * loss_l + loss_c
        loss.backward()
        optimizer.step()
        load_t1 = time.time()
        batch_time = load_t1 - load_t0
        eta = int(batch_time * (max_iter - iteration))
        print(
            'Epoch:{}/{} || Epochiter: {}/{} || Iter: {}/{} || L: {:.4f} C: {:.4f} || LR: {:.8f} || Batchtime: {:.4f} s || ETA: {}'
            .format(epoch, max_epoch, (iteration % epoch_size) + 1,
                    epoch_size, iteration + 1, max_iter, loss_l.item(),
                    loss_c.item(), lr, batch_time,
                    str(datetime.timedelta(seconds=eta))))

    torch.save(net.state_dict(),
               os.path.join(save_folder, 'Final_FaceBoxes.pth'))
コード例 #6
0
ファイル: main.py プロジェクト: jianzhnie/FaceBoxes
def main():
    global args
    global minmum_loss
    args.gpu = 0
    args.world_size = 1

    if args.distributed:
        args.gpu = args.local_rank % torch.cuda.device_count()
        torch.cuda.set_device(args.gpu)
        torch.distributed.init_process_group(backend='nccl',
                                             init_method='env://')
        args.world_size = torch.distributed.get_world_size()

    args.total_batch_size = args.world_size * args.batch_size

    model = FaceBoxes('train', args.num_classes)
    print("Printing net...")

    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(model)

    model = model.cuda()

    # optimizer and loss function
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    criterion = MultiBoxLoss(num_classes=args.num_classes,
                             overlap_thresh=0.35,
                             prior_for_matching=True,
                             bkg_label=0,
                             neg_mining=True,
                             neg_pos=7,
                             neg_overlap=0.35,
                             encode_target=False)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(
                args.resume,
                map_location=lambda storage, loc: storage.cuda(args.gpu))
            args.start_epoch = checkpoint['epoch']
            minmum_loss = checkpoint['minmum_loss']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # Data loading code
    print('Loading Dataset...')
    dataset = VOCDetection(args.training_dataset,
                           preproc(args.img_dim, args.rgb_mean),
                           AnnotationTransform())
    train_loader = data.DataLoader(dataset,
                                   args.batch_size,
                                   num_workers=args.num_workers,
                                   shuffle=True,
                                   collate_fn=detection_collate,
                                   pin_memory=True)

    priorbox = PriorBox(cfg, image_size=(args.img_dim, args.img_dim))
    with torch.no_grad():
        priors = priorbox.forward()
        priors = priors.cuda()

    for epoch in range(args.start_epoch, args.epochs):
        # train for one epoch
        end = time.time()
        loss = train(train_loader, model, priors, criterion, optimizer, epoch)
        if args.local_rank == 0:
            is_best = loss < minmum_loss
            minmum_loss = min(loss, minmum_loss)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_prec1': minmum_loss,
                    'optimizer': optimizer.state_dict(),
                }, is_best, epoch)
        epoch_time = time.time() - end
        print('Epoch %s time cost %f' % (epoch, epoch_time))
コード例 #7
0
        if head == 'module.':
            name = k[7:]  # remove `module.`
        else:
            name = k
        new_state_dict[name] = v
    net.load_state_dict(new_state_dict)
    net.eval()
    print('Finished loading model!')
    print(net)
    if args.cuda:
        net = net.cuda()
        cudnn.benchmark = True
    else:
        net = net.cpu()
    # evaluation
    #top_k = (300, 200)[args.dataset == 'COCO']
    top_k = 200
    detector = Detect(num_classes, 0, cfg)
    save_folder = os.path.join(args.save_folder, args.dataset)
    rgb_means = ((104, 117, 123), (103.94, 116.78,
                                   123.68))[args.version == 'RFB_mobile']
    basetransform = preproc(512, rgb_means, -2)
    test_net(num_classes,
             save_folder,
             net,
             detector,
             args.cuda,
             basetransform,
             top_k,
             thresh=0.01)