Example #1
0
def main(model='res_unet.ResUNet', res=(256, )):
    from face_tran.utils.obj_factory import obj_factory
    model = obj_factory(model)
    if len(res) == 1:
        img = torch.rand(1, model.in_nc, res, res)
        pred = model(img)
        print(pred.shape)
    else:
        img = []
        for i in range(1, len(res) + 1):
            img.append(torch.rand(1, model.in_nc, res[-i], res[-i]))
        pred = model(img)
        print(pred.shape)
def main(root_path, img_list, pil_transforms):
    import cv2
    import torchvision.transforms as transforms
    from face_tran.utils.obj_factory import obj_factory
    import face_tran.utils as utils
    pil_transforms = [obj_factory(t) for t in pil_transforms
                      ] if pil_transforms is not None else []
    pil_transforms = transforms.Compose(pil_transforms)
    dataset = ImageListDataset(root_path, img_list, transform=pil_transforms)
    for img, label in dataset:
        img = np.array(img)[:, :, ::-1].copy()
        cv2.imshow('img', img)
        print('label = ' + str(label))
        cv2.waitKey(0)
def main(root_path, split, pil_transforms):
    import cv2
    import torchvision.transforms as transforms
    import numpy as np
    from face_tran.utils.obj_factory import obj_factory

    pil_transforms = [obj_factory(t) for t in pil_transforms
                      ] if pil_transforms is not None else []
    pil_transforms = transforms.Compose(pil_transforms)
    dataset = CelebaHQ(root=root_path,
                       split=split,
                       pil_transforms=pil_transforms)
    for img, label in dataset:
        img = np.array(img)[:, :, ::-1].copy()
        cv2.imshow('img', img)
        print('label = ' + str(label))
        cv2.waitKey(0)
    def __init__(self, det_model_path=None, lms_model_path=None,  seg_model_path=None,
                 gpus=None, cpu_only=None, max_size=640, batch_size=8,
                 conf_threshold=0.5, nms_threshold=0.4, verbose=0):

        self.max_size = max_size
        self.batch_size = batch_size
        self.conf_threshold = conf_threshold
        self.nms_threshold = nms_threshold
        self.verbose = verbose
        self.device = tran_utils.set_device(gpus, cpu_only)

        # Load face detection model
        if det_model_path is not None:
            print('Loading face detection model: "' + os.path.basename(det_model_path) + '"...')
            self.detection_net = torch.jit.load(det_model_path, map_location=self.device)
            if self.detection_net is None:
                raise RuntimeError('Failed to load face detection model!')

        # Load face landmarks model
        if lms_model_path is not None:
            print('Loading face landmarks model: "' + os.path.basename(lms_model_path) + '"...')
            self.landmarks_net = torch.jit.load(lms_model_path, map_location=self.device)
            if self.landmarks_net is None:
                raise RuntimeError('Failed to load face landmarks model!')

        # Load face segmentation model
        if seg_model_path is not None:
            print('Loading face segmentation model: "' + os.path.basename(seg_model_path) + '"...')
            if seg_model_path.endswith('.pth'):
                checkpoint = torch.load(seg_model_path)
                self.segmentation_net = obj_factory(checkpoint['arch']).to(self.device)
                self.segmentation_net.load_state_dict(checkpoint['state_dict'])
            else:
                self.segmentation_net = torch.jit.load(seg_model_path, map_location=self.device)
            if self.segmentation_net is None:
                raise RuntimeError('Failed to load face segmentation model!')
            self.segmentation_net.eval()
def main(exp_dir='/data/experiments', pretrained_model=None, train_dir=None, val_dir=None, workers=4, iterations=None, epochs=180, start_epoch=0,
         batch_size=64, learning_rate=0.1, momentum=0.9, weight_decay=1e-4, resume_dir=None, seed=None,
         gpus=None, tensorboard=False,
         train_dataset=None, val_dataset=None,
         optimizer='optim.SGD(lr=0.1,momentum=0.9,weight_decay=1e-4)',
         scheduler='lr_scheduler.StepLR(step_size=30,gamma=0.1)',
         log_freq=20, pil_transforms=None, tensor_transforms=None, arch='resnet18'):
    best_prec1 = 0

    # Validation
    if not os.path.isdir(exp_dir):
        raise RuntimeError('Experiment directory was not found: \'' + exp_dir + '\'')


    # Check CUDA device availability
    use_cuda = torch.cuda.is_available()
    if use_cuda:
        gpus = list(range(torch.cuda.device_count())) if not gpus else gpus
        print('=> using GPU devices: {}'.format(', '.join(map(str, gpus))))
    else:
        gpus = None
        print('=> using CPU device')
    device = torch.device('cuda:{}'.format(gpus[0])) if gpus else torch.device('cpu')

    # Initialize loggers
    logger = SummaryWriter(log_dir=exp_dir) if tensorboard else None

    # Initialize datasets
    pil_transforms = [obj_factory(t) for t in pil_transforms] if pil_transforms is not None else []
    tensor_transforms = [obj_factory(t) for t in tensor_transforms] if tensor_transforms is not None else []
    if not tensor_transforms:
        tensor_transforms = [transforms.ToTensor(),
                             transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]
    img_transforms = transforms.Compose(pil_transforms + tensor_transforms)

    val_dataset = train_dataset if val_dataset is None else val_dataset
    train_dataset = obj_factory(train_dataset, train_dir, transform=img_transforms)
    if val_dir:
        val_dataset = obj_factory(val_dataset, val_dir, transform=img_transforms)

    # Initialize data loaders
    sampler = None
    if iterations is not None:
        sampler = tutils.data.sampler.SubsetRandomSampler(np.random.choice(len(train_dataset), iterations))
    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=batch_size, sampler=sampler,
        num_workers=workers, pin_memory=True, drop_last=True, shuffle=not sampler)
    if val_dir:
        if iterations is not None:
            sampler = tutils.data.sampler.SubsetRandomSampler(np.random.choice(len(val_dataset), iterations))
        val_loader = torch.utils.data.DataLoader(
            val_dataset, batch_size=batch_size, sampler=sampler,
            num_workers=workers, pin_memory=True, drop_last=True, shuffle=not sampler)

    # Create model
    model = obj_factory(arch)
    model.apply(utils.init_weights)
    if pretrained_model is not None:
        pretrained_weights = torch.load(pretrained_model)
        if arch.startswith('vgg'):
            model.features.load_state_dict(pretrained_weights, strict=False)
        else:
            model.load_state_dict(pretrained_weights, strict=False)

    # Optimizer and scheduler
    optimizer = obj_factory(optimizer, model.parameters())
    scheduler = obj_factory(scheduler, optimizer)

    # Optionally resume from a checkpoint
    checkpoint_dir = exp_dir if resume_dir is None else resume_dir
    model_path = os.path.join(checkpoint_dir, 'model_latest.pth')
    if os.path.isfile(model_path):
        print("=> loading checkpoint from '{}'".format(checkpoint_dir))
        checkpoint = torch.load(model_path)
        best_prec1 = checkpoint['best_prec1']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        scheduler.load_state_dict(checkpoint['scheduler'])
    else:
        print("=> no checkpoint found at '{}'".format(checkpoint_dir))


    # Support multiple GPUs
    if gpus and len(gpus) > 1:
        model = nn.DataParallel(model, gpus).to(device)
    else:
        model.to(device)


    # define loss function (criterion)
    criterion = nn.CrossEntropyLoss().to(device)

    cudnn.benchmark = True

    for epoch in range(start_epoch, epochs):
        if not isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
            scheduler.step()

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, epochs, device, logger, log_freq)

        # evaluate on validation set
        val_loss, prec1 = validate(val_loader, model, criterion, epoch, epochs, device, logger, log_freq)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        utils.save_checkpoint(exp_dir, 'model', {
            'epoch': epoch + 1,
            'arch': arch,
            'state_dict': model.module.state_dict() if gpus and len(gpus) > 1 else model.state_dict(),
            'best_prec1': prec1,
            'optimizer': optimizer.state_dict(),
            'scheduler': scheduler.state_dict(),
        }, is_best)

        if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
            scheduler.step(val_loss)
def main(exp_dir='/data/experiments',
         pretrained_dir=None,
         train_dir=None,
         val_dir=None,
         workers=4,
         iterations=None,
         epochs=90,
         start_epoch=0,
         batch_size=1,
         resume_dir=None,
         seed=None,
         gpus=None,
         tensorboard=False,
         train_dataset=None,
         val_dataset=None,
         optimizer='optim.SGD(lr=0.1,momentum=0.9,weight_decay=1e-4)',
         scheduler='lr_scheduler.StepLR(step_size=30,gamma=0.1)',
         log_freq=20,
         pil_transforms=None,
         tensor_transforms=None,
         visual_augmentations=None,
         geo_augmentations=None,
         arch='resnet18',
         pretrained=False,
         cudnn_benchmark=True):
    best_iou = 0

    # Validation
    if not os.path.isdir(exp_dir):
        raise RuntimeError('Experiment directory was not found: \'' + exp_dir +
                           '\'')

    # Seed
    if seed is not None:
        random.seed(seed)
        torch.manual_seed(seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    # Check CUDA device availability
    use_cuda = torch.cuda.is_available()
    if use_cuda:
        gpus = list(range(torch.cuda.device_count())) if not gpus else gpus
        print('=> using GPU devices: {}'.format(', '.join(map(str, gpus))))
    else:
        gpus = None
        print('=> using CPU device')
    device = torch.device('cuda:{}'.format(
        gpus[0])) if gpus else torch.device('cpu')

    # Initialize loggers
    logger = SummaryWriter(log_dir=exp_dir) if tensorboard else None

    # Initialize transforms
    pil_transforms = [obj_factory(t) for t in pil_transforms
                      ] if type(pil_transforms) == list else pil_transforms
    pil_transforms = seg_transforms.ComposePair(pil_transforms)
    visual_augmentations = [obj_factory(t) for t in visual_augmentations
                            ] if visual_augmentations else []
    if visual_augmentations:
        visual_augmentations = transforms.Compose(visual_augmentations)
    # geo_augmentations = [obj_factory(t) for t in geo_augmentations] if visual_augmentations else []
    # if geo_augmentations:
    #     geo_augmentations = transforms.Compose(geo_augmentations)
    tensor_transforms = [obj_factory(t) for t in tensor_transforms
                         ] if tensor_transforms is not None else []
    if not tensor_transforms:
        tensor_transforms = [
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ]
    tensor_transforms = transforms.Compose(tensor_transforms)

    # Initialize datasets
    val_dataset = train_dataset if val_dataset is None else val_dataset
    train_dataset = obj_factory(train_dataset,
                                train_dir,
                                pil_transforms=pil_transforms,
                                tensor_transforms=tensor_transforms,
                                visual_augmentations=visual_augmentations,
                                geo_augmentations=geo_augmentations)

    if val_dir:
        val_dataset = obj_factory(val_dataset,
                                  val_dir,
                                  pil_transforms=pil_transforms,
                                  tensor_transforms=tensor_transforms)
    # Initialize data loaders
    if iterations is None:
        train_sampler = tutils.data.sampler.WeightedRandomSampler(
            train_dataset.weights, len(train_dataset), replacement=False)
    else:
        train_sampler = tutils.data.sampler.WeightedRandomSampler(
            train_dataset.weights, iterations, replacement=False)
    train_loader = tutils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        sampler=train_sampler,
        num_workers=workers,
        pin_memory=True,
        drop_last=True,
        shuffle=False,
    )
    if val_dir:
        val_loader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size=batch_size,
                                                 num_workers=workers)

    # Create model
    model = obj_factory(arch)

    if not pretrained:
        model.apply(utils.init_weights)
    model.to(device)

    # Optimizer and scheduler
    optimizer = obj_factory(optimizer, model.parameters())
    scheduler = obj_factory(scheduler, optimizer)

    # optionally resume from a checkpoint
    checkpoint_dir = pretrained_dir
    model_path = os.path.join(checkpoint_dir, 'model_best.pth')
    if os.path.isfile(model_path):
        print("=> loading checkpoint from '{}'".format(checkpoint_dir))
        checkpoint = torch.load(model_path)
        model.load_state_dict(checkpoint['state_dict'])

    else:
        print("=> no checkpoint found at '{}'".format(checkpoint_dir))
        if pretrained:
            print("=> using pre-trained model '{}'".format(arch))
        else:
            print("=> randomly initializing model...")

    # Support multiple GPUs
    if gpus and len(gpus) > 1:
        model = nn.DataParallel(model, gpus)

    # define loss function (criterion)
    criterion = nn.CrossEntropyLoss().to(device)

    # define running metrics
    running_metrics_val = seg_utils.Metrics(train_dataset.n_classes)

    # if input shape are the same for the dataset then set to True, otherwise False
    cudnn.benchmark = cudnn_benchmark

    # training/validation procedure loop
    for epoch in range(start_epoch, epochs):
        if not isinstance(scheduler, optim.lr_scheduler.ReduceLROnPlateau):
            scheduler.step()

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, device, logger,
              log_freq)

        # evaluate on validation set
        val_loss, score, report = validate(val_loader, model, criterion, epoch,
                                           epochs, device, logger,
                                           running_metrics_val)

        # remember best MeanIoU and save checkpoint
        iou = score["Mean IoU : \t"]
        is_best = iou > best_iou
        best_iou = max(iou, best_iou)
        utils.save_checkpoint(
            exp_dir, 'model', {
                'epoch':
                epoch + 1,
                'arch':
                arch,
                'state_dict':
                model.module.state_dict()
                if gpus and len(gpus) > 1 else model.state_dict(),
                'best_iou':
                best_iou,
                'optimizer':
                optimizer.state_dict(),
                'scheduler':
                scheduler.state_dict(),
                'report':
                report,
            }, is_best)

        if isinstance(scheduler, optim.lr_scheduler.ReduceLROnPlateau):
            scheduler.step(val_loss)
Example #7
0
def main(exp_dir='/data/experiments', train_dir=None, val_dir=None, workers=4, iterations=None, epochs=180, start_epoch=0,
         batch_size=64, resume_dir=None, seed=None,
         gpus=None, tensorboard=False,
         train_dataset=None, val_dataset=None,
         optimizer='optim.SGD(lr=0.1,momentum=0.9,weight_decay=1e-4)',
         scheduler='lr_scheduler.StepLR(step_size=30,gamma=0.1)',
         log_freq=20, pil_transforms=None, tensor_transforms=None, arch='resnet18'):
    best_acc_mean = 0

    # Validation
    if not os.path.isdir(exp_dir):
        raise RuntimeError('Experiment directory was not found: \'' + exp_dir + '\'')

    # Seed
    if seed is not None:
        random.seed(seed)
        torch.manual_seed(seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    # Check CUDA device availability
    use_cuda = torch.cuda.is_available()
    if use_cuda:
        gpus = list(range(torch.cuda.device_count())) if not gpus else gpus
        print('=> using GPU devices: {}'.format(', '.join(map(str, gpus))))
    else:
        gpus = None
        print('=> using CPU device')
    device = torch.device('cuda:{}'.format(gpus[0])) if gpus else torch.device('cpu')

    # Initialize loggers
    logger = SummaryWriter(log_dir=exp_dir) if tensorboard else None

    # Initialize datasets
    pil_transforms = [obj_factory(t) for t in pil_transforms] if pil_transforms is not None else []
    tensor_transforms = [obj_factory(t) for t in tensor_transforms] if tensor_transforms is not None else []
    if not tensor_transforms:
        tensor_transforms = [transforms.ToTensor(),
                             transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]
    img_transforms = transforms.Compose(pil_transforms + tensor_transforms)

    val_dataset = train_dataset if val_dataset is None else val_dataset
    train_dataset = obj_factory(train_dataset, train_dir, pil_transforms=img_transforms)
    if val_dir:
        val_dataset = obj_factory(val_dataset, val_dir, pil_transforms=img_transforms)

    # Initialize data loaders
    if iterations is None:
        train_sampler = tutils.data.sampler.WeightedRandomSampler(train_dataset.weights, len(train_dataset))
    else:
        train_sampler = tutils.data.sampler.WeightedRandomSampler(train_dataset.weights, iterations)
    train_loader = tutils.data.DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler,
                                          num_workers=workers, pin_memory=True, drop_last=True, shuffle=False)
    if val_dir:
        val_loader = tutils.data.DataLoader(val_dataset, batch_size=batch_size,
                                            num_workers=workers, pin_memory=True, drop_last=True, shuffle=False)

    # Create model
    model = obj_factory(arch, num_classes=train_dataset.classes).to(device)

    # Optimizer and scheduler
    optimizer = obj_factory(optimizer, model.parameters())
    scheduler = obj_factory(scheduler, optimizer)

    # Optionally resume from a checkpoint
    checkpoint_dir = exp_dir if resume_dir is None else resume_dir
    model_path = os.path.join(checkpoint_dir, 'model_latest.pth')
    if os.path.isfile(model_path):
        print("=> loading checkpoint from '{}'".format(checkpoint_dir))
        checkpoint = torch.load(model_path)
        best_acc_mean = checkpoint['best_acc_mean']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        scheduler.load_state_dict(checkpoint['scheduler'])
    else:
        print("=> no checkpoint found at '{}'".format(checkpoint_dir))

    # Support multiple GPUs
    if gpus and len(gpus) > 1:
        model = nn.DataParallel(model, gpus)

    # define loss function (criterion)
    criterion = nn.MultiLabelSoftMarginLoss().to(device)

    running_metrics_val = utils.RunningScore(train_dataset.classes)
    cudnn.benchmark = True

    for epoch in range(start_epoch, epochs):
        if not isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
            scheduler.step()

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, epochs, device, logger, log_freq)

        # evaluate on validation set
        val_loss, acc_mean = validate(val_loader, model, criterion, epoch, epochs, device, logger, running_metrics_val)

        # remember best prec@1 and save checkpoint
        is_best = acc_mean > best_acc_mean
        best_acc_mean = max(acc_mean, best_acc_mean)
        utils.save_checkpoint(exp_dir, 'model', {
            'epoch': epoch + 1,
            'arch': arch,
            'state_dict': model.module.state_dict() if gpus and len(gpus) > 1 else model.state_dict(),
            'best_acc_mean': best_acc_mean,
            'optimizer': optimizer.state_dict(),
            'scheduler': scheduler.state_dict(),
        }, is_best)

        if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
            scheduler.step(val_loss)
        running_metrics_val.reset()
def main(model='res_unet.ResUNet', res=256):
    from face_tran.utils.obj_factory import obj_factory
    model = obj_factory(model)
    img = torch.rand(1, model.in_nc, res, res)
    pred = model(img)
    print(pred.shape)
def main(exp_dir='/data/experiments',
         output_dir='/data/experiments',
         val_dir=None,
         workers=4,
         iterations=None,
         batch_size=1,
         seed=None,
         gpus=None,
         tensorboard=False,
         val_dataset=None,
         pil_transforms=None,
         tensor_transforms=None,
         arch='resnet18',
         cudnn_benchmark=True,
         use_crf=False):

    # Validation
    if not os.path.isdir(exp_dir):
        raise RuntimeError('Experiment directory was not found: \'' + exp_dir +
                           '\'')

    # Seed
    if seed is not None:
        random.seed(seed)
        torch.manual_seed(seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    # Check CUDA device availability
    use_cuda = torch.cuda.is_available()
    if use_cuda:
        gpus = list(range(torch.cuda.device_count())) if not gpus else gpus
        print('=> using GPU devices: {}'.format(', '.join(map(str, gpus))))
    else:
        gpus = None
        print('=> using CPU device')
    device = torch.device('cuda:{}'.format(
        gpus[0])) if gpus else torch.device('cpu')

    # Initialize datasets
    if pil_transforms is not None:
        pil_transforms = [obj_factory(t) for t in pil_transforms
                          ] if type(pil_transforms) == list else pil_transforms

    tensor_transforms = [obj_factory(t) for t in tensor_transforms
                         ] if tensor_transforms is not None else []
    if not tensor_transforms:
        tensor_transforms = [
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ]
    tensor_transforms = transforms.Compose(tensor_transforms)

    print(val_dir, val_dataset)
    val_dataset = obj_factory(val_dataset,
                              val_dir,
                              pil_transforms=pil_transforms,
                              tensor_transforms=tensor_transforms)

    # Initialize data loaders
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=batch_size,
                                             num_workers=workers)

    # Create model
    model = obj_factory(arch)
    model.to(device)

    # Load weights
    checkpoint_dir = exp_dir
    model_path = os.path.join(checkpoint_dir, 'model_best.pth')
    if os.path.isfile(model_path):
        print("=> loading checkpoint from '{}'".format(checkpoint_dir))
        checkpoint = torch.load(model_path)
        model.load_state_dict(checkpoint['state_dict'])

    # Support multiple GPUs
    if gpus and len(gpus) > 1:
        model = nn.DataParallel(model, gpus)

    # if input shape are the same for the dataset then set to True, otherwise False
    cudnn.benchmark = cudnn_benchmark

    # evaluate on validation set
    validate(val_loader, model, device, val_dir, batch_size)