コード例 #1
0
def train_seg(args):
    batch_size = args.batch_size
    num_workers = args.workers
    crop_size = args.crop_size

    for k, v in args.__dict__.items():
        print(k, ':', v)

    single_model = DRNSeg(args.arch, args.classes, None, pretrained=True)
    if args.pretrained:
        if torch.cuda.is_available():
            single_model.load_state_dict(torch.load(args.pretrained))
        else:
            print("CUDA not available!")
            single_model.load_state_dict(
                torch.load(args.pretrained, map_location='cpu'))
    model = torch.nn.DataParallel(single_model).cuda()
    # TODO: Add in weights [1.0, 0.1]
    criterion = nn.NLLLoss(ignore_index=255)
    criterion.cuda()

    # Data loading code
    data_dir = args.data_dir
    info = json.load(open(join(data_dir, 'info.json'), 'r'))
    normalize = transforms.Normalize(mean=info['mean'], std=info['std'])
    t = []
    if args.random_rotate > 0:
        t.append(transforms.RandomRotate(args.random_rotate))
    if args.random_scale > 0:
        t.append(transforms.RandomScale(args.random_scale))
    t.extend([
        transforms.RandomCrop(crop_size),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), normalize
    ])
    train_loader = torch.utils.data.DataLoader(SegList(data_dir,
                                                       'train',
                                                       transforms.Compose(t),
                                                       list_dir=args.list_dir),
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=num_workers,
                                               pin_memory=True,
                                               drop_last=True)
    val_loader = torch.utils.data.DataLoader(SegList(
        data_dir,
        'val',
        transforms.Compose([
            transforms.RandomCrop(crop_size),
            transforms.ToTensor(),
            normalize,
        ]),
        list_dir=args.list_dir),
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=num_workers,
                                             pin_memory=True,
                                             drop_last=True)

    # define loss function (criterion) and pptimizer
    optimizer = torch.optim.SGD(single_model.optim_parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    cudnn.benchmark = True
    best_prec1 = 0
    start_epoch = 0

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    if args.evaluate:
        validate(val_loader, model, criterion, eval_score=accuracy)
        return

    for epoch in range(start_epoch, args.epochs):
        lr = adjust_learning_rate(args, optimizer, epoch)
        logger.info('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr))
        # train for one epoch
        train(train_loader,
              model,
              criterion,
              optimizer,
              epoch,
              eval_score=accuracy)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, eval_score=accuracy)

        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        checkpoint_path = args.arch + '_checkpoint_latest.pth'
        state = {
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1
        }
        save_checkpoint(state, is_best, checkpoint_path)
        if (epoch + 1) % 1 == 0:
            history_path = args.arch + '_checkpoint_{:03d}.pth'.format(epoch +
                                                                       1)
            shutil.copyfile(checkpoint_path, history_path)
コード例 #2
0
def train_dehaze(args, saveDirName='.', logger=None):
    batch_size = args.batch_size
    num_workers = args.workers
    crop_size = args.crop_size

    print(' '.join(sys.argv))

    # logging hyper-parameters
    for k, v in args.__dict__.items():
        logger.info('{0}:\t{1}'.format(k, v))

    # Generators
    net = generator(3, 3)
    net = nn.DataParallel(net).cuda()

    model = net

    # Criterion for updating weights
    criterion = nn.L1Loss()
    criterion = criterion.cuda()

    # Data loading code
    data_dir = args.data_dir

    t = []

    if args.random_scale > 0:
        t.append(transforms.RandomScale(args.random_scale))

    t.append(transforms.RandomCrop(crop_size))

    if args.random_rotate > 0:
        t.append(transforms.RandomRotate(args.random_rotate))
    t.extend([
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.RandomIdentityMapping(p=0.4),
        transforms.ToTensor(),
    ])

    # DataLoaders for training/validation dataset
    train_loader = torch.utils.data.DataLoader(DehazeList(
        data_dir, 'train', transforms.Compose(t), out_name=False),
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    val_loader = torch.utils.data.DataLoader(DehazeList(
        data_dir,
        'val',
        transforms.Compose([
            transforms.ToTensor(),
        ]),
        out_name=True),
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=num_workers,
                                             pin_memory=False,
                                             drop_last=False)

    # define loss function (criterion) and optimizer
    optimizer = torch.optim.Adam(net.parameters(),
                                 args.lr,
                                 betas=(0.5, 0.999),
                                 weight_decay=args.weight_decay)

    cudnn.benchmark = True
    best_psnr1 = 0
    start_epoch = 0

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            start_epoch = checkpoint['epoch']
            best_psnr1 = checkpoint['best_psnr1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    lr = args.lr
    for epoch in range(start_epoch, args.epochs):
        lr = adjust_learning_rate(args, optimizer, epoch, lr)
        logger.info('Epoch: [{0}]\tlr {1:.06f}'.format(epoch + 1, lr))

        train(train_loader,
              model,
              criterion,
              optimizer,
              epoch,
              eval_score=psnr,
              logger=logger)

        psnr1 = 0

        if epoch % 5 == 4:
            psnr1 = validate(val_loader, model, criterion, eval_score=psnr, save_vis=True, \
                                epoch=epoch, logger=logger, best_score=best_psnr1)
        else:
            psnr1 = validate(val_loader, model, criterion, eval_score=psnr, epoch=epoch, \
                                logger=logger, best_score=best_psnr1)

        if epoch == 0:
            best_psnr1 = psnr1

        is_best = (psnr1 >= best_psnr1)
        best_psnr1 = max(psnr1, best_psnr1)

        checkpoint_path = saveDirName + '/' + 'checkpoint_latest.pth.tar'

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_psnr1': best_psnr1,
            },
            is_best,
            filename=checkpoint_path)

        if (epoch + 1) % 1 == 0:
            history_path = saveDirName + '/' + 'checkpoint_{:03d}.pth.tar'.format(
                epoch + 1)
            shutil.copyfile(checkpoint_path, history_path)
コード例 #3
0
def main():
    train_transform = torchvision.transforms.Compose([
        transforms.ConvertToRGD(),
        transforms.SubtractImage(144),
        transforms.CenterCrop(351),
        transforms.RandomRotate(0, 360),
        transforms.CenterCrop(321),
        transforms.RandomTranslate(50),
        transforms.Resize(224),
        transforms.SelectRandomPos()
    ])
    val_transform = torchvision.transforms.Compose([
        transforms.ConvertToRGD(),
        transforms.SubtractImage(144),
        transforms.CenterCrop(321),
        transforms.Resize(224)
    ])

    train_dataset = dataset.CornellGraspDataset('cornell/train',
                                                transform=train_transform)
    val_dataset = dataset.CornellGraspDataset('cornell/val',
                                              transform=val_transform)
    test_dataset = dataset.CornellGraspDataset('cornell/test',
                                               transform=val_transform)
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True)
    val_loader = DataLoader(val_dataset,
                            batch_size=args.batch_size,
                            shuffle=False)
    test_loader = DataLoader(test_dataset,
                             batch_size=args.batch_size,
                             shuffle=False)

    if args.is_this_loss == 'l1':
        loss = deep_twist.models.baseline.l1_loss
    else:
        loss = deep_twist.models.baseline.softmax_l1_loss

    one_hot = True
    if args.model == 'resnet':
        model = deep_twist.models.baseline.ResNet()
    if args.model == 'alexnet':
        model = deep_twist.models.baseline.AlexNet()
        loss = deep_twist.models.baseline.softmax_l1_loss
    if args.model == 'alexnet_prime':
        model = deep_twist.models.baseline.AlexNetPrime()
        loss = deep_twist.models.baseline.l1_loss
        one_hot = False
    if args.model == 'vgg16':
        model = deep_twist.models.baseline.VGG16()

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    train_utils.train_model(args,
                            model,
                            loss,
                            train_loader,
                            val_loader,
                            test_loader,
                            optimizer,
                            one_hot=one_hot)
コード例 #4
0
def train_seg(args):
    batch_size = args.batch_size
    num_workers = args.workers
    crop_size = args.crop_size

    print(' '.join(sys.argv))

    for k, v in args.__dict__.items():
        print(k, ':', v)

    single_model = DRNSeg(args.arch, args.classes, None)  #,
    #pretrained=False)
    # if args.pretrained:
    #     single_model.load_state_dict(torch.load(args.pretrained))
    model = torch.nn.DataParallel(single_model).cuda()
    criterion = nn.NLLLoss2d(ignore_index=255)

    criterion.cuda()

    # Data loading code
    data_dir = args.data_dir
    # info = json.load(open(join(data_dir, 'info.json'), 'r'))
    # normalize = transforms.Normalize(mean=info['mean'],
    #                                  std=info['std'])
    t = []
    if args.random_rotate > 0:
        t.append(transforms.RandomRotate(args.random_rotate))
    if args.random_scale > 0:
        t.append(transforms.RandomScale(args.random_scale))
    # t.extend([transforms.RandomCrop(crop_size),
    #           transforms.RandomHorizontalFlip(),
    #           transforms.ToTensor(),
    #           #normalize])

    transform = transforms.ToTensor()
    dataset = CORE50(root='/home/akash/core50/data/core50_128x128',
                     scenario="ni")
    for i, train_batch in enumerate(dataset):
        # WARNING train_batch is NOT a mini-batch, but one incremental batch!
        # You can later train with SGD indexing train_x and train_y properly.
        train_x, train_y = train_batch

        print("----------- batch {0} -------------".format(i))
        print("train_x shape: {}, train_y shape: {}".format(
            train_x.shape, train_y.shape))

        helper_dataset = CORE50Helper(train_x, train_y, t, transform=transform)
        train_loader = DataLoader(helper_dataset,
                                  batch_size=1,
                                  shuffle=True,
                                  num_workers=4)

        # train_loader = torch.utils.data.DataLoader(
        #     SegList(data_dir, 'train', transforms.Compose(t),
        #             list_dir=args.list_dir),
        #     batch_size=batch_size, shuffle=True, num_workers=num_workers,
        #     pin_memory=True, drop_last=True
        # )
        # val_loader = torch.utils.data.DataLoader(
        #     SegList(data_dir, 'val', transforms.Compose([
        #         transforms.RandomCrop(crop_size),
        #         transforms.ToTensor(),
        #         normalize,
        #     ]), list_dir=args.list_dir),
        #     batch_size=batch_size, shuffle=False, num_workers=num_workers,
        #     pin_memory=True, drop_last=True
        # )

        # define loss function (criterion) and pptimizer
        optimizer = torch.optim.SGD(single_model.optim_parameters(),
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)

        cudnn.benchmark = True
        best_prec1 = 0
        start_epoch = 0

        # optionally resume from a checkpoint
        if args.resume:
            if os.path.isfile(args.resume):
                print("=> loading checkpoint '{}'".format(args.resume))
                checkpoint = torch.load(args.resume)
                start_epoch = checkpoint['epoch']
                best_prec1 = checkpoint['best_prec1']
                model.load_state_dict(checkpoint['state_dict'])
                print("=> loaded checkpoint '{}' (epoch {})".format(
                    args.resume, checkpoint['epoch']))
            else:
                print("=> no checkpoint found at '{}'".format(args.resume))

        # if args.evaluate:
        #     validate(val_loader, model, criterion, eval_score=accuracy)
        #     return

        for epoch in range(start_epoch, args.epochs):
            lr = adjust_learning_rate(args, optimizer, epoch)
            logger.info('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr))
            # train for one epoch
            writer1 = SummaryWriter()
            train(train_loader,
                  model,
                  criterion,
                  optimizer,
                  epoch,
                  writer1,
                  eval_score=accuracy)
コード例 #5
0
def train_dehaze(args, save_dir='.', logger=None):
    batch_size = args.batch_size
    num_workers = args.workers
    crop_size = args.crop_size

    # logging hyper-parameters
    for k, v in args.__dict__.items():
        logger.info('{0}:\t{1}'.format(k, v))

    # Dehazing model
    net = dehaze_net(3, 3, activation=nn.ReLU(inplace=True))
    net = nn.DataParallel(net).cuda()

    # criterion for updating weights
    criterion = dehazing_loss(coeff_l1=args.coeff_l1,
                              coeff_cl=args.coeff_cl,
                              coeff_ssim=args.coeff_ssim)
    criterion = criterion.cuda()

    # data-loading code
    data_dir = args.data_dir

    t = []

    if args.random_scale > 0:
        t.append(transforms.RandomScale(args.random_scale))

    t.append(transforms.RandomCrop(crop_size))

    if args.random_rotate > 0:
        t.append(transforms.RandomRotate(args.random_rotate))
    t.extend([
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.RandomIdentityMapping(p=args.random_identity_mapping),
        transforms.ToTensor(),
    ])

    # DataLoaders for training/validation dataset
    train_loader = torch.utils.data.DataLoader(DehazeList(
        data_dir, 'train', transforms.Compose(t), out_name=False),
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    val_loader = torch.utils.data.DataLoader(DehazeList(
        data_dir,
        'val',
        transforms.Compose([
            transforms.ToTensor(),
        ]),
        out_name=True),
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=num_workers,
                                             pin_memory=False,
                                             drop_last=False)

    # define loss function (criterion) and optimizer
    optimizer = torch.optim.Adam(net.parameters(),
                                 args.lr,
                                 betas=(0.5, 0.999),
                                 weight_decay=args.weight_decay)

    cudnn.benchmark = True
    best_score = 0
    start_epoch = 0
    train_losses, train_scores, val_losses, val_scores = [], [], [], []

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            start_epoch = checkpoint['epoch']
            best_score = checkpoint['best_score']
            optimizer = checkpoint['optimizer']
            train_losses, train_scores, val_losses, val_scores = checkpoint[
                'training_log']
            net.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    lr = args.lr
    for epoch in range(start_epoch, args.epochs):
        lr = adjust_learning_rate(args, optimizer, epoch, lr)
        logger.info('Epoch: [{0}]\tlr {1:.06f}'.format(epoch + 1, lr))

        train_loss, train_score = train(train_loader,
                                        net,
                                        criterion,
                                        optimizer,
                                        epoch,
                                        eval_score=psnr,
                                        logger=logger)

        val_loss, val_score = 0, 0

        if epoch % 10 == 9:
            val_loss, val_score = validate(val_loader, net, criterion, eval_score=psnr, save_vis=True, output_dir=save_dir,\
                                epoch=epoch, logger=logger, best_score=best_score)
        else:
            val_loss, val_score = validate(val_loader, net, criterion, eval_score=psnr, epoch=epoch, output_dir=save_dir,\
                                logger=logger, best_score=best_score)

        train_losses.append(train_loss)
        val_losses.append(val_loss)
        train_scores.append(train_score)
        val_scores.append(val_score)

        if epoch == 0:
            best_score = val_score

        is_best = (val_score >= best_score)
        best_score = max(val_score, best_score)

        checkpoint_path = save_dir + '/' + 'checkpoint_latest.pth.tar'

        save_checkpoint(
            {
                'epoch':
                epoch + 1,
                'state_dict':
                net.state_dict(),
                'best_score':
                best_score,
                'optimizer':
                optimizer,
                'training_log':
                (train_losses, train_scores, val_losses, val_scores),
            },
            is_best,
            filename=checkpoint_path)

        if epoch % 10 == 9:
            history_path = save_dir + '/' + 'checkpoint_{:03d}.pth.tar'.format(
                epoch + 1)
            shutil.copyfile(checkpoint_path, history_path)
            draw_curves(train_losses,
                        train_scores,
                        val_losses,
                        val_scores,
                        epoch + 1,
                        save_dir=save_dir + '/curves')
コード例 #6
0
ファイル: sodgan.py プロジェクト: yongwuSHU/Advsal
def run(settings):
    settings.description = 'SODGAN with default settings.'
    settings.train_dir = env_settings().msra10k_dir
    settings.ckpt = env_settings().workspace_dir
    settings.multi_gpu = False  # multi-gpus training。
    settings.iter_num = 5000  # Number of iters, 2000 for first training
    settings.batch_size = 8
    settings.diter_num = 10000  # Number of iters for Dcriminator, 0 for only train Generator, 10000 for second training.
    settings.last_iter = 2000  # 0 for first training, 2000 for second training
    settings.lr = 1e-3
    settings.lr_decay = 0.9
    settings.weight_decay = 5e-4
    settings.momentum = 0.9
    settings.snapshot = '2000'  # None for first training, 2000 for second training
    settings.num_workers = 12  # Number of workers for image loading
    settings.normalize_mean = [
        0.485, 0.456, 0.406
    ]  # Normalize mean (default pytorch ImageNet values)
    settings.normalize_std = [
        0.229, 0.224, 0.225
    ]  # Normalize std (default pytorch ImageNet values)
    settings.log_path = os.path.join(settings.ckpt,
                                     str(datetime.datetime.now()) + '.txt')

    joint_transform = transforms.Compose([
        transforms.RandomCrop(300),
        transforms.RandomHorizontallyFlip(),
        transforms.RandomRotate(10)
    ])
    img_transform = torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize(mean=settings.normalize_mean,
                                         std=settings.normalize_std)
    ])
    target_transform = torchvision.transforms.ToTensor()

    train_set = ImageFolder(settings.train_dir, joint_transform, img_transform,
                            target_transform)
    train_loader = DataLoader(train_set,
                              batch_size=settings.batch_size,
                              num_workers=settings.num_workers,
                              shuffle=True)
    objective = nn.BCELoss().cuda()

    net = Net().cuda().train()
    if settings.multi_gpu:
        net = MultiGPU(net, dim=0)

    optimizer = optim.SGD([{
        'params': [
            param
            for name, param in net.named_parameters() if name[-4:] == 'bias'
        ],
        'lr':
        2 * settings.lr
    }, {
        'params': [
            param
            for name, param in net.named_parameters() if name[-4:] != 'bias'
        ],
        'lr':
        settings.lr,
        'weight_decay':
        settings.weight_decay
    }],
                          momentum=settings.momentum)

    if len(settings.snapshot) > 0:
        print('training resumes from ' + settings.snapshot)
        net = net.module if multigpu.is_multi_gpu(net) else net
        net.load_state_dict(
            torch.load(os.path.join(settings.ckpt,
                                    settings.snapshot + '.pth')))
        optimizer.load_state_dict(
            torch.load(
                os.path.join(settings.ckpt, settings.snapshot + '_optim.pth')))
        optimizer.param_groups[0]['lr'] = 2 * settings.lr
        optimizer.param_groups[1]['lr'] = settings.lr
    else:
        optimizer.param_groups[0]['lr'] = 2 * settings.lr
        optimizer.param_groups[1]['lr'] = settings.lr

    check_mkdir(settings.ckpt)
    open(settings.log_path, 'w').write(str(settings) + '\n\n')

    trainer = LTRTrainer(net,
                         objective,
                         optimizer,
                         train_loader,
                         settings,
                         t=0)
    trainer.train(settings.diter_num)  # 0 for first training