all_dev = parse_devices(args.devices)

    network = PSPNet(config.num_classes)
    data_setting = {
        'img_root': config.img_root_folder,
        'gt_root': config.gt_root_folder,
        'train_source': config.train_source,
        'eval_source': config.eval_source,
        'test_source': config.test_source
    }
    dataset = Cil(data_setting, 'val', None)

    if args.speed_test:
        device = all_dev[0]
        logger.info("=========DEVICE:%s SIZE:%s=========" %
                    (torch.cuda.get_device_name(device), args.input_size))
        input_size = tuple(int(x) for x in args.input_size.split('x'))
        compute_speed(network, input_size, device, args.iteration)
    elif args.summary:
        input_size = tuple(int(x) for x in args.input_size.split('x'))
        stat(network, input_size)
    else:
        with torch.no_grad():
            segmentor = SegEvaluator(dataset, config.num_classes,
                                     config.image_mean, config.image_std,
                                     network, config.eval_scale_array,
                                     config.eval_flip, all_dev, args.verbose,
                                     args.save_path)
            segmentor.run(config.snapshot_dir, args.epochs,
                          config.val_log_file, config.link_val_log_file)
def main():
    global args, best_prec1, USE_GPU, device
    args = parser.parse_args()

    with open(args.config) as f:
        config = yaml.load(f)

    for k, v in config['common'].items():
        setattr(args, k, v)

    ## Set random seeds ##
    torch.manual_seed(args.random_seed)
    np.random.seed(args.random_seed)
    random.seed(args.random_seed)
    #print(args)
    #print(xxx)
    # create models
    if args.input_size != 224 or args.image_size != 256:
        image_size = args.image_size
        input_size = args.input_size
    else:
        image_size = 256
        input_size = 224
    print("Input image size: {}, test size: {}".format(image_size, input_size))

    if "model" in config.keys():
        model = models.__dict__[args.arch](**config['model'])
    else:
        model = models.__dict__[args.arch]()
    device = torch.device(
        'cuda:' + str(args.gpus[0]) if torch.cuda.is_available() else "cpu")
    str_input_size = '1x3x224x224'
    if args.summary:
        input_size = tuple(int(x) for x in str_input_size.split('x'))
        stat(model, input_size)
        return
    if USE_GPU:
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.random_seed)
        args.gpus = [int(i) for i in args.gpus.split(',')]
        model = torch.nn.DataParallel(model, device_ids=args.gpus)
        model.to(device)

    count_params(model)
    pytorch_total_params = sum(p.numel() for p in model.parameters())
    print('total_params', pytorch_total_params)

    # define loss function (criterion) and optimizer
    criterion = FocalLoss(device, 2, gamma=args.fl_gamma)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.speed:
        input_size = tuple(int(x) for x in str_input_size.split('x'))
        iteration = 1000
        compute_speed(model, input_size, device, iteration)
        return


##########################################################
# adjust start learning rate
    args.lr = args.lr * (args.batch_size // 32)
    #########################################################
    # optionally resume from a checkpoint
    if args.resume:
        print(os.getcwd())
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # Data loading code
    normalize = transforms.Normalize(
        mean=[0.14300402, 0.1434545,
              0.14277956],  ##accorcoding to casia-surf val to commpute
        std=[0.10050353, 0.100842826, 0.10034215])
    img_size = args.input_size

    ratio = 224.0 / float(img_size)
    train_dataset = CASIA(
        args,
        transforms.Compose([
            transforms.RandomResizedCrop(img_size),
            transforms.RandomHorizontalFlip(),
            #ColorTransform(p=0.3),
            transforms.ToTensor(),
            ColorAugmentation(),
            normalize,
        ]),
        phase_train=True)
    val_dataset = CASIA(args,
                        transforms.Compose([
                            transforms.Resize(int(256 * ratio)),
                            transforms.CenterCrop(img_size),
                            transforms.ToTensor(),
                            normalize,
                        ]),
                        phase_train=False,
                        phase_test=args.phase_test)

    train_sampler = None
    val_sampler = None

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        shuffle=(train_sampler is None),
        num_workers=args.workers,
        pin_memory=(train_sampler is None),
        sampler=train_sampler)

    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True,
                                             sampler=val_sampler)

    if args.evaluate:
        validate(val_loader, model, criterion, args.start_epoch)
        return
    else:
        print(model)

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)
        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)
        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, epoch)
        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        if is_best:
            print('epoch: {} The best is {} last best is {}'.format(
                epoch, prec1, best_prec1))
        best_prec1 = max(prec1, best_prec1)

        if not os.path.exists(args.save_path):
            os.makedirs(args.save_path)

        if is_best:
            save_name = '{}/{}_{}_best.pth.tar'.format(args.save_path,
                                                       args.model_name, epoch)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_prec1': best_prec1,
                    'optimizer': optimizer.state_dict(),
                },
                filename=save_name)
Exemple #3
0
def main():
    global args, best_prec1, USE_GPU, device, sub, modal
    args = parser.parse_args()
    modal = args.modal
    SUB = ['4@1', '4@2', '4@3']
    sub = SUB[args.sub - 1]

    with open(args.config) as f:
        config = yaml.load(f)

    for k, v in config['common'].items():
        setattr(args, k, v)

    ## Set random seeds ##
    torch.manual_seed(args.random_seed)
    np.random.seed(args.random_seed)
    random.seed(args.random_seed)

    # create models
    if args.input_size != 224 or args.image_size != 256:
        image_size = args.image_size
        input_size = args.input_size
    else:
        image_size = 256
        input_size = 224
    print("Input image size: {}, test size: {}".format(image_size, input_size))

    if "model" in config.keys():
        model = models.__dict__[args.arch](**config['model'])
    else:
        model = models.__dict__[args.arch]()
    device = torch.device('cuda:' + str(args.gpus[0]) if torch.cuda.is_available() else "cpu")
    str_input_size = '1x3x224x224'
    if args.summary:
        input_size = tuple(int(x) for x in str_input_size.split('x'))
        stat(model, input_size)
        return
    if USE_GPU:
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.random_seed)
        args.gpus = [int(i) for i in args.gpus.split(',')]
        model = torch.nn.DataParallel(model, device_ids=args.gpus)
        model.to(device)

    # count_params(model)
    pytorch_total_params = sum(p.numel() for p in model.parameters())
    print('total_params', pytorch_total_params)

    # define loss function (criterion) and optimizer
    criterion = FocalLoss(device, 2, gamma=args.fl_gamma)

    optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.speed:
        input_size = tuple(int(x) for x in str_input_size.split('x'))
        iteration = 1000
        compute_speed(model, input_size, device, iteration)
        return

    # optionally resume from a checkpoint
    if args.resume:
        print(os.getcwd())
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    norm = dict()
    norm["4@1_depth"] = transforms.Normalize(mean=[0.70745318, 0.70745318, 0.70745318],
                                             std=[0.26528493, 0.26528493, 0.26528493])
    norm["4@2_depth"] = transforms.Normalize(mean=[0.67922, 0.67922, 0.67922],
                                             std=[0.2890028, 0.2890028, 0.2890028])
    norm["4@3_depth"] = transforms.Normalize(mean=[0.7110071, 0.7110071, 0.7110071],
                                             std=[0.28458922, 0.28458922, 0.28458922])
    norm["4@1_ir"] = transforms.Normalize(mean=[0.22784027, 0.22784027, 0.22784027],
                                          std=[0.10182471, 0.10182471, 0.10182471])
    norm["4@2_ir"] = transforms.Normalize(mean=[0.34846747, 0.34846747, 0.34846747],
                                          std=[0.17954783, 0.17954783, 0.17954783])
    norm["4@3_ir"] = transforms.Normalize(mean=[0.31522677, 0.31522677, 0.31522677],
                                          std=[0.16015441, 0.16015441, 0.16015441])

    img_size = args.input_size

    if modal == 'merge':
        norm_type = ['ir', 'depth']
    else:
        norm_type = [modal]
    train_trans = dict()
    val_trans = dict()
    for n in norm_type:
        train_trans[n] = transforms.Compose([
            transforms.RandomResizedCrop(img_size),
            transforms.RandomHorizontalFlip(),
            transforms.RandomRotation(10),
            transforms.ColorJitter(0.1, 0.1),
            transforms.ToTensor(),
            # ColorAugmentation(),
            norm["{}_{}".format(sub, n)],
        ])

        val_trans[n] = transforms.Compose([
            transforms.Resize([224, 224]),
            transforms.ToTensor(),
            norm["{}_{}".format(sub, n)],
        ])

    train_dataset = CASIA(
        train_trans, phase_train=True, sub=sub, modal=modal)
    val_dataset = CASIA(
        val_trans, phase_train=False, phase_test=args.phase_test, sub=sub, modal=modal)

    train_sampler = None
    val_sampler = None

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
        num_workers=args.workers, pin_memory=(train_sampler is None), sampler=train_sampler)

    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False,
                                             num_workers=args.workers, pin_memory=False, sampler=val_sampler)

    if args.evaluate:
        validate(val_loader, model, criterion, args.start_epoch)
        return
    else:
        print(model)

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)
        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)
        # evaluate on validation set
        if args.train_val:
            prec1 = validate(val_loader, model, criterion, epoch)
            # remember best prec@1 and save checkpoints
            is_best = prec1 > best_prec1
            if is_best:
                print('epoch: {} The best is {} last best is {}'.format(epoch, prec1, best_prec1))
            best_prec1 = max(prec1, best_prec1)

            if not os.path.exists(args.save_path):
                os.mkdir(args.save_path)
            save_name = '{}/{}_{}_{}_{}_best.pth.tar'.format(args.save_path, sub, modal, args.model_name,
                                                             epoch) if is_best else \
                '{}/{}_{}_{}_{}.pth.tar'.format(args.save_path, sub, modal, args.model_name, epoch)
            save_checkpoint({
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, filename=save_name)
        else:
            if not os.path.exists(args.save_path):
                os.mkdir(args.save_path)
            save_name = '{}/{}_{}_{}_{}.pth.tar'.format(args.save_path, sub, modal, args.model_name, epoch)
            save_checkpoint({
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, filename=save_name)
Exemple #4
0
def main():
    global args, best_prec1, USE_GPU, device
    ## Set random seeds ##
    torch.manual_seed(args.random_seed)
    np.random.seed(args.random_seed)
    random.seed(args.random_seed)

    # create models
    if args.input_size != 224 or args.image_size != 256:
        image_size = args.image_size
        input_size = args.input_size
    else:
        image_size = 114
        input_size = 112
    logger.Print("Input image size: {}, test size: {}".format(
        image_size, input_size))
    logger.Print(f"lr = {args.lr} batch_size = {args.batch_size}")
    logger.Print(f'protoal:{args.protoal}')

    model = Model(pretrained=False)
    device = torch.device(
        'cuda:' + str(args.gpus[0]) if torch.cuda.is_available() else "cpu")
    str_input_size = '1x3x112x112'
    if args.summary:
        input_size = tuple(int(x) for x in str_input_size.split('x'))
        stat(model, input_size)
        return
    if USE_GPU:
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.random_seed)
        args.gpus = [int(i) for i in args.gpus.split(',')]
        #model = torch.nn.DataParallel(model,device_ids=args.gpus)
        model.to(device)

    pytorch_total_params = sum(p.numel() for p in model.parameters())
    logger.Print(f'total_params:{pytorch_total_params}')

    # define loss function (criterion) and optimizer
    if args.loss == "focal":
        criterion = FocalLoss(device, 2, gamma=args.fl_gamma)
    else:
        criterion = nn.CrossEntropyLoss()
    #criterion = criterion.to(device=config.device)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.speed:
        input_size = tuple(int(x) for x in str_input_size.split('x'))
        iteration = 1000
        compute_speed(model, input_size, device, iteration)
        return

    # optionally resume from a checkpoint
    if args.resume:
        logger.Print(os.getcwd())
        if os.path.isfile(args.resume):
            logger.Print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            logger.Print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            logger.Print("=> no checkpoint found at '{}'".format(args.resume))

    img_size = args.input_size
    train_loader, val_loader = load_cisia_csfa(root=args.train_path,
                                               protoal=args.protoal,
                                               img_size=img_size,
                                               train_batch=args.batch_size)

    if args.evaluate:
        validate(val_loader, model, criterion, args.start_epoch)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)
        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)
        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, epoch)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        if is_best:
            logger.Print('epoch: {} The best is {} last best is {}'.format(
                epoch, prec1, best_prec1))
        best_prec1 = max(prec1, best_prec1)
        save_path = log_path + '/models/'
        if not os.path.exists(save_path):
            os.mkdir(save_path)
        elif epoch % 2 == 0:
            save_name = '{}/{}_{}.pth.tar'.format(log_path, args.arch, epoch)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_prec1': best_prec1,
                    'optimizer': optimizer.state_dict(),
                },
                filename=save_name)