Exemplo n.º 1
0
    def __init__(self, device, logger, opts):
        self.logger = logger
        self.device = device
        self.opts = opts

        self.model = make_model(opts)
        self.model = self.model.to(device)
        self.distributed = False

        logger.info(f"[!] Model made with{'out' if opts.no_pretrained else ''} pre-trained")

        logger.debug(self.model)

        # xxx Set up optimizer
        params = []
        params.append({"params": filter(lambda p: p.requires_grad, self.model.body.parameters()),
                       'weight_decay': opts.weight_decay})

        params.append({"params": filter(lambda p: p.requires_grad, self.model.head.parameters()),
                       'weight_decay': opts.weight_decay})

        params.append({"params": filter(lambda p: p.requires_grad, self.model.cls.parameters()),
                       'weight_decay': opts.weight_decay})

        self.optimizer = torch.optim.SGD(params, lr=opts.lr, momentum=0.9, nesterov=True)
        self.scheduler = get_scheduler(opts, self.optimizer)
        logger.debug("Optimizer:\n%s" % self.optimizer)

        self.criterion = nn.CrossEntropyLoss(ignore_index=255, reduction='none')
        self.reduction = HardNegativeMining() if opts.hnm else MeanReduction()
Exemplo n.º 2
0
def main():
    global args, config

    args = parser.parse_args()

    with open(args.config) as rPtr:
        config = EasyDict(yaml.load(rPtr))

    config.save_path = os.path.dirname(args.config)

    # Random seed
    torch.manual_seed(config.seed)
    torch.cuda.manual_seed(config.seed)
    np.random.seed(config.seed)
    random.seed(config.seed)

    # Datasets
    train_transform = transforms.Compose([
        transforms.RandomCrop((32, 32), padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.491, 0.482, 0.447), (0.247, 0.243, 0.262))
    ])
    val_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.491, 0.482, 0.447), (0.247, 0.243, 0.262))
    ])

    trainset = Datasets.CIFAR10(root='data', train=True, download=True, transform=train_transform)
    trainloader = Data.DataLoader(trainset, batch_size=config.batch_size, shuffle=True, num_workers=config.workers)

    testset = Datasets.CIFAR10(root='data', train=False, download=True, transform=val_transform)
    testloader = Data.DataLoader(testset, batch_size=config.batch_size, shuffle=False, num_workers=config.workers)

    # Model
    model = resnet32()
    model = model.cuda()

    # Optimizer
    criterion = nn.CrossEntropyLoss()

    params = model.parameters()
    if config.nobiasdecay:
        print("Apply no bias decay heuristic.")
        params = Nobiasdecay(model)

    optimizer = optim.SGD(params, lr=config.lr_scheduler.base_lr, momentum=config.momentum,
            weight_decay=config.weight_decay)

    # LR scheduler
    lr_scheduler = get_scheduler(optimizer, config.lr_scheduler)

    global PCA, Writer
    PCA = PerClassAccuracy(num_classes=config.num_classes)
    Writer = SummaryWriter(config.save_path + '/events')
    for iter_idx in range(config.max_iter):
        train(model, iter_idx, criterion, lr_scheduler, optimizer, trainloader)
        val(model, iter_idx, criterion, testloader)

    Writer.close()
 def cread_optimiter(self, config):
     print("creating optimizer")
     self.optimizer = build_optimizer(config, self.model.parameters())
     self.config.lr_scheduler['optimizer'] = self.optimizer
     self.lr_scheduler = get_scheduler(self.config.lr_scheduler)
Exemplo n.º 4
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(opt.dataset, opt.task)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)

    logger = Logger(opt)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(
            model, opt.load_model, optimizer, opt.resume, opt.lr, opt.lr_step)

    print('Setting up data...')
    val_loader = torch.utils.data.DataLoader(
        Dataset(opt, 'val', opt.test_on_subset),
        batch_size=opt.batch_size * 2,
        shuffle=False,
        num_workers=opt.num_workers,
        pin_memory=True
    )

    train_loader = torch.utils.data.DataLoader(
        Dataset(opt, 'train'),
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.num_workers,
        pin_memory=True,
        drop_last=True
    )

    num_iters = len(train_loader) if opt.num_iters < 0 else opt.num_iters

    if opt.lr_type == 'STEP':
        scheduler_config = {
            'type': 'STEP',
            'lr_steps': [step * num_iters for step in opt.lr_step],
            'lr_mults': opt.lr_mults,
            'base_lr': opt.lr,
            'warmup_steps': opt.warmup_step,
            'warmup_lr': opt.warmup_lr,
            'max_iter': opt.num_epochs * num_iters,
            'last_iter': start_epoch * num_iters - 1
        }
    elif opt.lr_type == 'COSINE':
        scheduler_config = {
            'type': 'COSINE',
            'base_lr': opt.lr,
            'warmup_steps': opt.warmup_step,
            'warmup_lr': opt.warmup_lr,
            'min_lr': 0.0,
            'max_iter': opt.num_epochs * num_iters,
            'last_iter': start_epoch * num_iters - 1
        }
    else:
        raise ValueError("lr_type should be STEP or COSINE.")

    lr_scheduler = get_scheduler(optimizer=optimizer, config=scheduler_config)

    Trainer = train_factory[opt.task]
    trainer = Trainer(opt, model, optimizer, logger, lr_scheduler=lr_scheduler)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    if opt.test:
        _, preds = trainer.val(0, val_loader)
        val_loader.dataset.run_eval(preds, opt.save_dir)
        return

    print('Starting training...')
    best = 1e10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'

        # # FIXME: dataloader into epoch
        # # every epoch regenerate dataloader to random sample frames
        train_loader = torch.utils.data.DataLoader(
            Dataset(opt, 'train'),
            batch_size=opt.batch_size,
            shuffle=True,
            num_workers=opt.num_workers,
            pin_memory=True,
            drop_last=True
        )

        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_average/{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            with torch.no_grad():
                log_dict_val, preds = trainer.val(epoch, val_loader)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_average/{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            if log_dict_val[opt.metric] < best:
                best = log_dict_val[opt.metric]
                save_model(os.path.join(opt.save_dir, 'model_best.pth'),
                           epoch, model)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'),
                       epoch, model, optimizer)
        logger.write('\n')
    logger.close()