コード例 #1
0
def main():
    args = parser.parse_args()

    test_time_pool = False
    if 'dpn' in args.model and args.img_size > 224 and not args.no_test_pool:
        test_time_pool = True

    # create model
    num_classes = 1000
    model = model_factory.create_model(args.model,
                                       num_classes=num_classes,
                                       pretrained=args.pretrained,
                                       test_time_pool=test_time_pool)

    print('Model %s created, param count: %d' %
          (args.model, sum([m.numel() for m in model.parameters()])))

    # optionally resume from a checkpoint
    if args.restore_checkpoint and os.path.isfile(args.restore_checkpoint):
        print("=> loading checkpoint '{}'".format(args.restore_checkpoint))
        checkpoint = torch.load(args.restore_checkpoint)
        if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
            model.load_state_dict(checkpoint['state_dict'])
        else:
            model.load_state_dict(checkpoint)
        print("=> loaded checkpoint '{}'".format(args.restore_checkpoint))
    elif not args.pretrained:
        print("=> no checkpoint found at '{}'".format(args.restore_checkpoint))
        exit(1)

    if args.multi_gpu:
        model = torch.nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    cudnn.benchmark = True

    transforms = model_factory.get_transforms_eval(args.model, args.img_size)

    dataset = Dataset(args.data, transforms)

    loader = data.DataLoader(dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.workers,
                             pin_memory=True)

    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()
    end = time.time()
    with torch.no_grad():
        for i, (input, target) in enumerate(loader):
            target = target.cuda()
            input = input.cuda()

            # compute output
            output = model(input)
            loss = criterion(output, target)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
            losses.update(loss.item(), input.size(0))
            top1.update(prec1.item(), input.size(0))
            top5.update(prec5.item(), input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.print_freq == 0:
                print('Test: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                      'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                          i,
                          len(loader),
                          batch_time=batch_time,
                          loss=losses,
                          top1=top1,
                          top5=top5))

    print(
        ' * Prec@1 {top1.avg:.3f} ({top1a:.3f}) Prec@5 {top5.avg:.3f} ({top5a:.3f})'
        .format(top1=top1,
                top1a=100 - top1.avg,
                top5=top5,
                top5a=100. - top5.avg))
コード例 #2
0
def main():
    args = parser.parse_args()

    # create model
    num_classes = 1000
    model = model_factory.create_model(
        args.model,
        num_classes=num_classes,
        pretrained=args.pretrained,
        test_time_pool=args.test_time_pool)

    # resume from a checkpoint
    if args.restore_checkpoint and os.path.isfile(args.restore_checkpoint):
        print("=> loading checkpoint '{}'".format(args.restore_checkpoint))
        checkpoint = torch.load(args.restore_checkpoint)
        if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
            model.load_state_dict(checkpoint['state_dict'])
        else:
            model.load_state_dict(checkpoint)
        print("=> loaded checkpoint '{}'".format(args.restore_checkpoint))
    elif not args.pretrained:
        print("=> no checkpoint found at '{}'".format(args.restore_checkpoint))
        exit(1)

    if args.multi_gpu:
        model = torch.nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

    transforms = model_factory.get_transforms_eval(
        args.model,
        args.img_size)

    dataset = Dataset(
        args.data,
        transforms)

    loader = data.DataLoader(
        dataset,
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    model.eval()

    batch_time = AverageMeter()
    end = time.time()
    top5_ids = []
    for batch_idx, (input, _) in enumerate(loader):
        input = input.cuda()
        input_var = autograd.Variable(input, volatile=True)
        labels = model(input_var)
        top5 = labels.topk(5)[1]
        top5_ids.append(top5.data.cpu().numpy())

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if batch_idx % args.print_freq == 0:
            print('Predict: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
                batch_idx, len(loader), batch_time=batch_time))

    top5_ids = np.concatenate(top5_ids, axis=0).squeeze()

    with open(os.path.join(args.output_dir, './top5_ids.csv'), 'w') as out_file:
        filenames = dataset.filenames()
        for filename, label in zip(filenames, top5_ids):
            filename = os.path.basename(filename)
            out_file.write('{0},{1},{2},{3},{4},{5}\n'.format(
                filename, label[0], label[1], label[2], label[3], label[4]))
コード例 #3
0
def main():
    args = parser.parse_args()

    # create model
    num_classes = 1000
    model = model_factory.create_model(args.model,
                                       num_classes=num_classes,
                                       pretrained=args.pretrained,
                                       test_time_pool=args.test_time_pool)

    # resume from a checkpoint
    if args.restore_checkpoint and os.path.isfile(args.restore_checkpoint):
        print("=> loading checkpoint '{}'".format(args.restore_checkpoint))
        checkpoint = torch.load(args.restore_checkpoint)
        if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
            model.load_state_dict(checkpoint['state_dict'])
        else:
            model.load_state_dict(checkpoint)
        print("=> loaded checkpoint '{}'".format(args.restore_checkpoint))
    elif not args.pretrained:
        print("=> no checkpoint found at '{}'".format(args.restore_checkpoint))
        exit(1)

    if args.multi_gpu:
        model = torch.nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

    transforms = model_factory.get_transforms_eval(args.model, args.img_size)

    dataset = Dataset(args.data, transforms)

    loader = data.DataLoader(dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.workers,
                             pin_memory=True)

    model.eval()

    batch_time = AverageMeter()
    end = time.time()
    top5_ids = []
    for batch_idx, (input, _) in enumerate(loader):
        input = input.cuda()
        input_var = autograd.Variable(input, volatile=True)
        labels = model(input_var)
        top5 = labels.topk(5)[1]
        top5_ids.append(top5.data.cpu().numpy())

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if batch_idx % args.print_freq == 0:
            print('Predict: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
                      batch_idx, len(loader), batch_time=batch_time))

    top5_ids = np.concatenate(top5_ids, axis=0).squeeze()

    with open(os.path.join(args.output_dir, './top5_ids.csv'),
              'w') as out_file:
        filenames = dataset.filenames()
        for filename, label in zip(filenames, top5_ids):
            filename = os.path.basename(filename)
            out_file.write('{0},{1},{2},{3},{4},{5}\n'.format(
                filename, label[0], label[1], label[2], label[3], label[4]))
コード例 #4
0
ファイル: validate.py プロジェクト: wangdingkang/RoadDetector
def main():
    args = parser.parse_args()

    test_time_pool = False
    if 'dpn' in args.model and args.img_size > 224 and not args.no_test_pool:
        test_time_pool = True

    # create model
    num_classes = 1000
    model = model_factory.create_model(
        args.model,
        num_classes=num_classes,
        pretrained=args.pretrained,
        test_time_pool=test_time_pool)

    print('Model %s created, param count: %d' %
          (args.model, sum([m.numel() for m in model.parameters()])))

    # optionally resume from a checkpoint
    if args.restore_checkpoint and os.path.isfile(args.restore_checkpoint):
        print("=> loading checkpoint '{}'".format(args.restore_checkpoint))
        checkpoint = torch.load(args.restore_checkpoint)
        if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
            model.load_state_dict(checkpoint['state_dict'])
        else:
            model.load_state_dict(checkpoint)
        print("=> loaded checkpoint '{}'".format(args.restore_checkpoint))
    elif not args.pretrained:
        print("=> no checkpoint found at '{}'".format(args.restore_checkpoint))
        exit(1)

    if args.multi_gpu:
        model = torch.nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    cudnn.benchmark = True

    transforms = model_factory.get_transforms_eval(
        args.model,
        args.img_size)

    dataset = Dataset(
        args.data,
        transforms)

    loader = data.DataLoader(
        dataset,
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (input, target) in enumerate(loader):
        target = target.cuda(async=True)
        input_var = torch.autograd.Variable(input, volatile=True).cuda()
        target_var = torch.autograd.Variable(target, volatile=True).cuda()

        # compute output
        output = model(input_var)
        loss = criterion(output, target_var)

        # measure accuracy and record loss
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        losses.update(loss.data[0], input.size(0))
        top1.update(prec1[0], input.size(0))
        top5.update(prec5[0], input.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                i, len(loader), batch_time=batch_time, loss=losses,
                top1=top1, top5=top5))

    print(' * Prec@1 {top1.avg:.3f} ({top1a:.3f}) Prec@5 {top5.avg:.3f} ({top5a:.3f})'.format(
        top1=top1, top1a=100-top1.avg, top5=top5, top5a=100.-top5.avg))
コード例 #5
0
def main():
    args = parser.parse_args()
    
    transforms = model_factory.get_transforms_eval(
        args.model,
        args.img_size)
        
    train_dataset = Dataset(
        data_json=data_path+"train_1.json",
        with_label=True,
        transform=None)
        
    val_dataset = Dataset(
        data_json=data_path+"val_1.json",
        with_label=True,
        transform=None)

    train_dataloader = data.DataLoader(
        train_dataset,
        batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)
    
    val_dataloader = data.DataLoader(
        val_dataset,
        batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)
        
    num_train = train_dataloader.__len__()
    
    log_file = open(args.output_dir+str(args.model)+"_grid_search_lr"+"_bs"+str(args.batch_size)+"_size"+str(args.img_size)+".log" ,"a+")
        
    # configuration
    config = {
        'train_batch_size': args.batch_size, 'val_batch_size': 100,
        'arch': args.model, 'pretrained': args.pretrained,
        'optimizer': 'Adam', 'lr_schedule_idx': args.lr_schedule, 'lr_schedule': get_lr_schedule(args.lr_schedule), 'weight_decay': 0,
        'resume': None,
        'start_epoch': 0, 'epochs': 150,
        'print_freq': args.print_freq, 'validate_freq': num_train-1, 'save_freq': num_train+1,
        'log_file': log_file
    }

    lr_list = [[1e-2], [9e-3], [8e-3], [7e-3], [6e-3], [5e-3], [4e-3], [3e-3], [2e-3], [1e-3]]

    for lr in lr_list:

        print("\ntest lr" + str(lr), file=config['log_file'], flush=True)
        config["lr_schedule"] = lr
        
        # create model
        num_classes = 1
        model = model_factory.create_model(args.model, num_classes=num_classes, pretrained=False, test_time_pool=args.test_time_pool)

        # resume from a checkpoint
        if args.restore_checkpoint and os.path.isfile(args.restore_checkpoint):
            print("=> loading checkpoint '{}'".format(args.restore_checkpoint))
            checkpoint = torch.load(args.restore_checkpoint)
        
            print('Epoch: [{0}] iter: [{1}]\t'
                'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                     checkpoint['epoch'], checkpoint['iter'],
                     loss=checkpoint['loss'],
                      top1=checkpoint['top1'],
                   top5=checkpoint['top5']))

            config = checkpoint['config']
            # set to resume mode
            config['resume'] = args.restore_checkpoint
            print(config)
        
            config['log_file'] = open(args.output_dir+str(config['arch'])+"_lr"+str(config['lr_schedule_idx'])+"_bs"+str(config['train_batch_size'])+"_size"+str(config['img_size'])+".log" ,"a+")
        elif args.pretrained == True:
            print("using pretrained model")
            original_model = args.model.split('_')[0]
            pretrained_model = model_factory.create_model(original_model, num_classes=1000, pretrained=args.pretrained, test_time_pool=args.test_time_pool)
        
            pretrained_state = pretrained_model.state_dict()
            model_state = model.state_dict()

            fc_layer_name = 'fc'
            if args.model.startswith('dpn'):
                fc_layer_name = 'classifier'
        
            for name, state in pretrained_state.items():
                if not name.startswith(fc_layer_name):
                    model_state[name].copy_(state)
        else:
            print("please use pretrained model")

            if args.multi_gpu:
                model = torch.nn.DataParallel(model).cuda()
            else:
                model = model.cuda()

            # define loss function (criterion) and optimizer
            criterion = torch.nn.BCELoss().cuda()

            # get trainer
            Trainer = get_trainer(train_dataloader, val_dataloader, model, criterion, config)

            # Run!
            Trainer.run()