validation_dataset = ChestXrayDataSetMultiLabel(DATA_DIR,
                                                VALIDATION_IMAGE_LIST,
                                                transform=transform_val)
validation_dataloader = DataLoader(dataset=validation_dataset,
                                   batch_size=BATCH_SIZE,
                                   shuffle=False,
                                   num_workers=4,
                                   pin_memory=True,
                                   drop_last=True)

test_dataset = ChestXrayDataSetMultiLabel(
    data_dir=DATA_DIR,
    image_list_file=TEST_IMAGE_LIST,
    transform=transforms.Compose([
        transforms.Resize(400),
        transforms.TenCrop(299),
        transforms.Lambda(lambda crops: torch.stack(
            [transforms.ToTensor()(crop) for crop in crops])),
        transforms.Lambda(lambda crops: torch.stack([
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            (crop) for crop in crops
        ]))
    ]))

test_dataloader = DataLoader(dataset=test_dataset,
                             batch_size=BATCH_SIZE,
                             shuffle=False,
                             num_workers=4,
                             pin_memory=True,
                             drop_last=True)
示例#2
0
preprocess_hflip = transforms.Compose([
    transforms.Resize(image_size),
    transforms.CenterCrop(input_size),
    HorizontalFlip(),
    transforms.ToTensor(), normalize
])


def temp_trans(torch_image):
    torch_image = transforms.ToTensor()(torch_image)
    return normalize(torch_image)


preprocess_tencrop = transforms.Compose([
    transforms.Resize(image_size),
    transforms.TenCrop(input_size),
    transforms.Lambda(
        lambda crops: torch.stack([temp_trans(crop) for crop in crops])),
    # transforms.ToTensor(),
    # normalize
])


def main():

    TTA2_preprocess = [preprocess, preprocess_hflip]
    TTA10_preprocess = [preprocess_tencrop]
    TTA12_preprocess = [preprocess, preprocess_hflip, preprocess_tencrop]
    id = 0
    print("testing {}.....".format(ckp_path))
示例#3
0
         transforms.ToTensor(),
         normalize,
     ]),
 'val_em': transforms.Compose([
         # transforms.Resize(290),
         # transforms.RandomAffine(degrees=0, shear=10),
         # transforms.CenterCrop(250),
         transforms.RandomResizedCrop(224, scale=(0.7, 1)),
         transforms.ColorJitter(brightness=0.4, contrast=0.4),
         # transforms.RandomHorizontalFlip(),
         transforms.ToTensor(),
         normalize,
     ]),
 'test_v1': transforms.Compose([
         transforms.Resize(180),
         transforms.TenCrop(160),
         transforms.ToTensor(),
         normalize,
     ]),
 'test_v2': transforms.Compose([
         transforms.Resize(250),
         transforms.TenCrop(224),
         transforms.ToTensor(),
         normalize,
     ]),
 'rtest_v1': transforms.Compose([
         transforms.Resize(180),
         transforms.TenCrop(160),
         transforms.ToTensor(),
         normalize,
     ]),
示例#4
0
def main():
    best_prec1 = 0

    if not os.path.isdir(args.save_dir):
        os.makedirs(args.save_dir)
    # log = open(os.path.join(args.save_dir, '{}.{}.log'.format(args.arch,args.prefix)), 'a')

    # create model
    print("=> creating model '{}'\n".format(args.arch))
    model = imagenet_models.__dict__[args.arch](1000)
    print("=> Model : {}\n".format(args.arch))
    # print("=> Model : {}\n".format(model))
    # print("=> parameter : {}\n".format(args))

    if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume,
                                    map_location=lambda storage, loc: storage)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            del checkpoint['state_dict'][
                'module.conv1.weights']  # fix this bug
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            for state in optimizer.state.values():
                for k, v in state.items():
                    if torch.is_tensor(v):
                        state[k] = v.cuda()

            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val-pytorch')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=None)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    val_loader_10 = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Scale(256),
            transforms.TenCrop(224),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])),
            transforms.Lambda(
                lambda crops: torch.stack([normalize(crop) for crop in crops]))
        ])),
                                                batch_size=16,
                                                shuffle=False,
                                                num_workers=args.workers,
                                                pin_memory=True)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    best_p1, best_p5, best_c10_p1, best_c10_p5 = 0, 0, 0, 0
    filename = os.path.join(
        args.save_dir,
        'checkpoint.{}.{}.pth.tar'.format(args.arch, args.prefix))
    bestname = os.path.join(
        args.save_dir, 'best.{}.{}.pth.tar'.format(args.arch, args.prefix))

    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=milestones,
                                                     gamma=0.1)

    start_time = time.time()
    epoch_time = AverageMeter()
    for epoch in range(args.start_epoch, args.epochs):
        need_hour, need_mins, need_secs = convert_secs2time(
            epoch_time.val * (args.epochs - epoch))
        need_time = '[Need: {:02d}:{:02d}:{:02d}]'.format(
            need_hour, need_mins, need_secs)
        print(' [{:s}] :: {:3d}/{:3d} ----- [{:s}] {:s}'.format(
            args.arch, epoch, args.epochs, time_string(), need_time))

        scheduler.step()

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1, prec5 = validate(val_loader, model, criterion, epoch)
        c10_p1, c10_p5 = validate_10crop(val_loader_10, model, criterion,
                                         epoch)

        # store the best
        best_p1 = prec1 if prec1 > best_p1 else best_p1
        best_p5 = prec5 if prec5 > best_p5 else best_p5
        best_c10_p1 = c10_p1 if c10_p1 > best_c10_p1 else best_c10_p1
        best_c10_p5 = c10_p5 if c10_p5 > best_c10_p5 else best_c10_p5

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best, filename, bestname)
        # measure elapsed time
        epoch_time.update(time.time() - start_time)
        start_time = time.time()

    # loss_history, top1_history, top5_history
    print('training loss', loss_history)
    print('training top1 accuracy', top1_history)
    print('training top5 accuracy', top5_history)
    print('testing top1 accuracy', test_top1_history)
    print('testing top5 accuracy', test_top5_history)
    print('10-croping top1', crop10_top1_history)
    print('10-croping top5', crop10_top5_history)
    print('Best single-crop top 1: {0:.2f}\n'
          'Best single-crop top 5: {1:.2f}\n'
          'Best 10-crop top 1: {2:.2f}\n'
          'Best 10-crop top 5: {3:.2f}\n'.format(best_p1, best_p5, best_c10_p1,
                                                 best_c10_p5))
cut_size = 44
total_epoch = 250

path = os.path.join(opt.dataset + '_' + opt.model)

# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
    transforms.RandomCrop(44),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
])

transform_test = transforms.Compose([
    transforms.TenCrop(cut_size),
    transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
])

trainset = FER2013(split = 'Training', transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=opt.bs, shuffle=True, num_workers=1)
PublicTestset = FER2013(split = 'PublicTest', transform=transform_test)
PublicTestloader = torch.utils.data.DataLoader(PublicTestset, batch_size=opt.bs, shuffle=False, num_workers=1)
PrivateTestset = FER2013(split = 'PrivateTest', transform=transform_test)
PrivateTestloader = torch.utils.data.DataLoader(PrivateTestset, batch_size=opt.bs, shuffle=False, num_workers=1)

# Model
if opt.model == 'VGG19':
    net = VGG('VGG19')
elif opt.model  == 'Resnet18':
    net = ResNet18()
示例#6
0
def main():

    cudnn.benchmark = True

    # initialize and load the model
    model = DenseNet121(N_CLASSES).cuda()
    model = torch.nn.DataParallel(model).cuda()

    if os.path.isfile(CKPT_PATH):
        print("=> loading checkpoint")
        checkpoint = torch.load(CKPT_PATH)
        #*************************debug*************************
        state_dict=checkpoint['state_dict']
        #print("before")
        #print(next(iter(state_dict)))
        #print("=> transforming parameter key names")
        pattern=re.compile(
                r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$'
                )
        for key in list(state_dict.keys()):
            res=pattern.match(key)
            if res:
                new_key=res.group(1)+res.group(2)
                state_dict[new_key]=state_dict[key]
                del state_dict[key]
        #*************************debug*************************
        #print("after")
        #print(next(iter(state_dict)))
        model.load_state_dict(state_dict)
        print("=> loaded checkpoint")
    else:
        print("=> no checkpoint found")

    normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])

    test_dataset = ChestXrayDataSet(data_dir=DATA_DIR,
                                    image_list_file=TEST_IMAGE_LIST,
                                    transform=transforms.Compose([
                                        transforms.Resize(256),
                                        transforms.TenCrop(224),
                                        transforms.Lambda
                                        (lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
                                        transforms.Lambda
                                        (lambda crops: torch.stack([normalize(crop) for crop in crops]))
                                    ]))
    test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE,
                             shuffle=False, num_workers=8, pin_memory=True)

    # initialize the ground truth and output tensor
    gt = torch.FloatTensor()
    gt = gt.cuda()
    pred = torch.FloatTensor()
    pred = pred.cuda()

    # switch to evaluate mode
    model.eval()
    with torch.no_grad():
        for i, (inp, target) in enumerate(test_loader):
            target = target.cuda()
            gt = torch.cat((gt, target), 0)
            bs, n_crops, c, h, w = inp.size()
            input_var = torch.autograd.Variable(inp.view(-1, c, h, w).cuda())
            #input_var = torch.autograd.Variable(inp.view(-1, c, h, w).cuda(), volatile=True)
            output = model(input_var)
            output_mean = output.view(bs, n_crops, -1).mean(1)
            pred = torch.cat((pred, output_mean.data), 0)

    AUROCs = compute_AUCs(gt, pred)
    AUROC_avg = np.array(AUROCs).mean()
    print('The average AUROC is {AUROC_avg:.3f}'.format(AUROC_avg=AUROC_avg))
    for i in range(N_CLASSES):
        print('The AUROC of {} is {}'.format(CLASS_NAMES[i], AUROCs[i]))
示例#7
0
def test(test_pth):
    CLASS_NAMES = [
        'Opacity', 'Cardiomegaly', 'Pulmonary Atelectasis', 'Calcinosis',
        'Lung/hypoinflation', 'Calcified Granuloma',
        'Thoracic Vertebrae/degenerative', 'Lung/hyperdistention',
        'Spine/degenerative', 'Aorta/tortuous', 'Pleural Effusion',
        'Atherosclerosis', 'Airspace Disease', 'Granulomatous Disease',
        'Nodule', 'Scoliosis'
    ]
    # CLASS_NAMES=['abnormal']
    cudnn.benchmark = True

    net = Net().cuda()
    net = torch.nn.DataParallel(net).cuda()

    modelCheckpoint = torch.load(test_pth)
    net.load_state_dict(modelCheckpoint['state_dict'])

    normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])

    transformList = []
    transformList.append(transforms.Resize([256, 256]))
    transformList.append(transforms.TenCrop([224, 224]))
    transformList.append(
        transforms.Lambda(lambda crops: torch.stack(
            [transforms.ToTensor()(crop) for crop in crops])))
    transformList.append(
        transforms.Lambda(
            lambda crops: torch.stack([normalize(crop) for crop in crops])))
    transformSequence = transforms.Compose(transformList)

    datasetTest = DatasetGenerator(pathImageDirectory=pathDirData,
                                   pathDatasetFile=pathFileTest,
                                   transform=transformSequence)
    dataLoaderTest = DataLoader(dataset=datasetTest,
                                batch_size=32,
                                num_workers=8,
                                shuffle=False,
                                pin_memory=True)

    outGT = torch.FloatTensor().cuda()
    outPRED = torch.FloatTensor().cuda()

    for i, (input1, input2, target) in enumerate(dataLoaderTest):
        target = target.cuda()
        outGT = torch.cat((outGT, target), 0)
        bs, n_crops, c, h, w = input1.size()
        varInput1 = torch.autograd.Variable(input1.view(-1, c, h, w).cuda(),
                                            volatile=True)
        varInput2 = torch.autograd.Variable(input2.view(-1, c, h, w).cuda(),
                                            volatile=True)
        out = net(varInput1, varInput2)
        outMean = out.view(bs, n_crops, -1).mean(1)
        outPRED = torch.cat((outPRED, outMean.data), 0)

    plt_roc(outGT, outPRED)
    aurocIndividual = computeAUROC(outGT, outPRED, 16)
    aurocMean = np.array(aurocIndividual).mean()
    print('AUROC mean ', aurocMean)

    for i in range(0, len(aurocIndividual)):
        print(CLASS_NAMES[i], ' ', aurocIndividual[i])

    return
示例#8
0
    def __init__(self,
                 train,
                 means=None,
                 stds=None,
                 size=None,
                 resize=None,
                 scale=None,
                 ratio=None,
                 colorjitter=None,
                 random_grayscale=None,
                 random_hflip=None,
                 tencrops=False):
        means = means if means is not None else cfg.tfm_means
        stds = stds if stds is not None else cfg.tfm_stds
        size = size if size is not None else cfg.tfm_size
        resize = resize if resize is not None else cfg.tfm_resize
        scale = scale if scale is not None else cfg.tfm_scale
        ratio = ratio if ratio is not None else cfg.tfm_ratio
        colorjitter = colorjitter if colorjitter is not None else cfg.tfm_colorjitter
        random_grayscale = random_grayscale if random_grayscale is not None else cfg.tfm_random_grayscale
        random_hflip = random_hflip if random_hflip is not None else cfg.tfm_random_hflip

        self.transforms = []
        if train:
            # size transform
            if scale is not None:
                logger.debug('Training samples will be '
                             'random resized with scale [%s] and ratio [%s] '
                             'then cropped to size [%s]' %
                             (scale, ratio, size))
                self.transforms.append(
                    transforms.RandomResizedCrop(size=size,
                                                 scale=scale,
                                                 ratio=ratio))
            else:
                logger.debug(
                    'Training samples will be resized to [%s] and then '
                    'random cropped into [%s]' % (resize, size))
                self.transforms.append(transforms.Resize(size=resize))
                self.transforms.append(transforms.RandomCrop(size))
            # colorjitter
            if colorjitter is not None:
                logger.debug(
                    'Training samples will be enhanced with colorjitter: '
                    '[%s]' % str(colorjitter))
                self.transforms.append(transforms.ColorJitter(*colorjitter))
            # grayscale
            if random_grayscale > 0:
                logger.debug('Training samples will be randomly converted to '
                             'grayscale with probability [%f]' %
                             random_grayscale)
                self.transforms.append(
                    transforms.RandomGrayscale(p=random_grayscale))
            # random hflip
            if random_hflip > 0:
                logger.debug(
                    'Training samples will be random horizontally flip '
                    'with probability [%f]' % random_hflip)
                self.transforms.append(
                    transforms.RandomHorizontalFlip(p=random_hflip))
        else:
            self.transforms.append(transforms.Resize(resize))
            if not tencrops:
                logger.debug(
                    'Testing samples will be resized to [%s] and then '
                    'center crop to [%s]' % (resize, size))
                self.transforms.append(transforms.CenterCrop(size))
            else:
                self.transforms.append(transforms.TenCrop(size))
                logger.debug(
                    'Testing sampels will be resized to [%s] and then '
                    'ten cropped to [%s]' % (resize, size))

        to_tensor = transforms.ToTensor()
        # to tensor and normalize
        if means is not None and stds is not None:
            logger.debug('Samples will be normalised with means: [%s] '
                         'and stds: [%s]' % (means, stds))
            normalise = transforms.Normalize(means, stds)
            if train or not tencrops:
                self.transforms.append(to_tensor)
                self.transforms.append(normalise)
            else:
                self.transforms.append(
                    transforms.Lambda(lambda crops: torch.stack(
                        [normalise(to_tensor(crop)) for crop in crops])))
        else:
            logger.debug('Samples will not be normalised')
            if train or not tencrops:
                self.transforms.append(to_tensor)
            else:
                self.transforms.append(
                    transforms.Lambda(lambda crops: torch.stack(
                        [to_tensor(crop) for crop in crops])))
示例#9
0
import pandas as pd



pathTestData='../../processed_data/test'

IRID_normalize = transforms.Normalize([0.511742964836, 0.243537961753, 0.0797484182405], [0.223165616204, 0.118469339976, 0.0464971614141])
IMAGENET_normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

#-------------------- SETTINGS: DATASET BUILDERS
datasetTest = DatasetGenerator(pathImageDirectory=pathTestData, transform=None)
dataLoaderTest = DataLoader(dataset=datasetTest, batch_size=1, num_workers=8, shuffle=False, pin_memory=False)

#---------------------custom transforms
transformListIRID = []
transformListIMAGENET = []


transformListIMAGENET.append(transforms.ToPILImage())
transformListIMAGENET.append(transforms.Resize(256))
transformListIMAGENET.append(transforms.TenCrop(224))
transformListIMAGENET.append(transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])))
transformListIMAGENET.append(transforms.Lambda(lambda crops: torch.stack([IMAGENET_normalize(crop) for crop in crops])))
transformIMAGENET = transforms.Compose(transformListIMAGENET)

imgs, labs, paths = next(iter(dataLoaderTest))
print (imgs.size())

# simgs = Image.fromarray(simgs)
print (transformIMAGENET(imgs.squeeze()).unsqueeze(0).size())
    def __init__(self, dataset, valsize=640, phase='train'):
        self.valsize = valsize
        if dataset == 'NYU':
            data_dir = '/home/gzx/RGBD/NYU-DV2/nyudv2_data_label_extractor/'
            input_list = sio.loadmat(
                data_dir +
                'splits.mat')['trainNdxs'][:, 0].tolist() + sio.loadmat(
                    data_dir + 'splits.mat')['testNdxs'][::5][:, 0].tolist(
                    ) if phase == 'train' else sio.loadmat(
                        data_dir + 'splits.mat')['testNdxs']
            self.image_list = [
                data_dir + 'data/images/' + 'img_%d.png' % (input + 5000)
                for input in input_list
            ]
            self.label_list = [
                data_dir + 'data/label40/' + 'img_%d.png' % (input + 5000)
                for input in input_list
            ]
            self.depth_list = [
                data_dir + 'data/depth/' + 'img_%d.png' % (input + 5000)
                for input in input_list
            ]
        elif dataset == 'SUN':
            sun_phase = phase if phase == 'train' else 'test'
            data_dir = '/home/gzx/RGBD/'
            input_list = range(1, 5286) if phase == 'train' else range(1, 5051)
            self.image_list = [
                data_dir + 'SUN/%s_image/' % sun_phase + 'img-%06d.jpg' % input
                for input in input_list
            ]
            self.label_list = [
                data_dir + 'SUN/sunrgbd-meta-data/labels/%s/' % sun_phase +
                'img-%06d.png' %
                (input if sun_phase == 'test' else input + 5050)
                for input in input_list
            ]
            self.depth_list = [
                data_dir + 'SUN/sunrgbd_%s_depth/' % sun_phase +
                '%d.png' % input for input in input_list
            ]
        else:
            assert (1 < 0)
        self.dataset = dataset

        def base_transform(t='img'):
            if t == 'label':
                interpolation = Image.NEAREST
            else:
                interpolation = Image.BILINEAR
            return {
                'train':
                transforms.Compose([
                    transforms.Resize(720, interpolation=interpolation),
                    transforms.RandomRotation(15, resample=interpolation),
                    transforms.RandomResizedCrop(640,
                                                 scale=(0.8, 1.5),
                                                 ratio=(1.2, 0.85),
                                                 interpolation=interpolation),
                    transforms.RandomHorizontalFlip(),
                ]),
                'val':
                transforms.Compose([
                    transforms.Resize((int(0.75 * self.valsize), self.valsize),
                                      interpolation=interpolation),
                ]),
                'test':
                transforms.Compose([
                    transforms.Resize(720, interpolation=interpolation),
                    transforms.Resize((640, 640), interpolation=interpolation),
                ]),
            }[phase]

        img_transform = {
            'train':
            transforms.Compose([
                base_transform(),
                #transforms.ColorJitter(brightness=0.2, contrast=0.3, saturation=0.3, hue=0.05),
                transforms.ToTensor(),
                #transforms.Lambda(lambda x: x + torch.randn_like(x) * 0.02),
            ]),
            'val':
            transforms.Compose([
                base_transform(),
                transforms.ToTensor(),
            ]),
            'test':
            transforms.Compose([
                transforms.TenCrop(640),
                transforms.Lambda(lambda crops: torch.stack(
                    [transforms.ToTensor()(crop) for crop in crops])),
            ]),
        }[phase]

        def image_transform(image, rand_seed=None):
            if rand_seed:
                random.seed(rand_seed)
            image = img_transform(image).permute(1, 2, 0)
            image = (image - IMG_MEAN) / IMG_STD
            image = image.permute(2, 0, 1)
            if image.size(0) == 1:
                image = image.repeat(3, 1, 1)
            return image

        self.image_transform = image_transform

        def depth_transform(depth, rand_seed=None):
            if rand_seed:
                random.seed(rand_seed)
            depth = np.array(base_transform('label')(depth))[np.newaxis, :, :]
            return torch.from_numpy(depth.astype(np.float32))

        self.depth_transform = depth_transform

        def label_transform(label, rand_seed=None):
            if rand_seed:
                random.seed(rand_seed)
            label = np.array(
                base_transform('label')(label)).astype('uint8') - 1
            #print (label)
            if label.ndim == 3:
                label = label[:, :, 0]
            return torch.from_numpy(label.astype(np.int64))

        self.label_transform = label_transform
示例#11
0
def main(modelfile):
    model_xml = os.path.join('model', modelfile)
    model_bin = model_xml.replace('.xml', '.bin')

    log.info('Creating Inference Engine')
    ie = IECore()
    net = ie.read_network(model=model_xml, weights=model_bin)

    log.info('Preparing input blobs')
    input_blob = next(iter(net.input_info))
    out_blob = next(iter(net.outputs))
    net.batch_size = (args.batch_size * N_CROPS)

    n, c, h, w = net.input_info[input_blob].input_data.shape

    # for image load
    normalize = transforms.Normalize(
            [0.485, 0.456, 0.406],
            [0.229, 0.224, 0.225])

    test_dataset = ChestXrayDataSet(data_dir=DATA_DIR,
            image_list_file=TEST_IMAGE_LIST,
            transform=transforms.Compose([
                transforms.Resize(256),
                transforms.TenCrop(224),
                transforms.Lambda
                (lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
                transforms.Lambda
                (lambda crops: torch.stack([normalize(crop) for crop in crops]))
                ]))
    
    test_loader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=False)

    gt = torch.FloatTensor()
    pred = torch.FloatTensor()
    
    # loading model to the plugin
    log.info('Loading model to the plugin')

    #config = {'CPU_THREADS_NUM': '48', 'CPU_THROUGHPUT_STREAMS': 'CPU_THROUGHPUT_AUTO'}
    config = {'CPU_THROUGHPUT_STREAMS': '%d' % args.cpu_throughput_streams}
    exec_net = ie.load_network(network=net, device_name='CPU', config=config, num_requests=args.num_requests)

    # Number of requests
    infer_requests = exec_net.requests
    print('reqeuest len', len(infer_requests))
    request_queue = InferRequestsQueue(infer_requests, out_blob)

    start_time = timeit.default_timer()

    for i, (inp, target) in enumerate(test_loader):
        bs, n_crops, c, h, w = inp.size()

        images = inp.view(-1, c, h, w).numpy()

        if bs !=  args.batch_size:
            images2 = np.zeros(shape=(args.batch_size * n_crops, c, h, w))
            images2[:bs*n_crops, :c, :h, :w] = images
            images = images2

        infer_request = request_queue.get_idle_request()

        infer_request.start_async({input_blob: images}, bs, target)

        if i == 20:
            break
        
    # wait the latest inference executions
    request_queue.wait_all()
    for i, queue in enumerate(request_queue.requests):
        # print(i, queue)
        gt = torch.cat((gt, queue.get_ground_truth()), 0)
        pred = torch.cat((pred, queue.get_prediction()), 0)
        
    print('Elapsed time: %0.2f sec.' % (timeit.default_timer() - start_time))

    AUCs = [roc_auc_score(gt.cpu()[:, i], pred.cpu()[:, i]) for i in range(N_CLASSES)]
    AUC_avg = np.array(AUCs).mean()
    print('The average AUC is {AUC_avg:.3f}'.format(AUC_avg=AUC_avg))
    for i in range(N_CLASSES):
        print('The AUC of {} is {:.3f}'.format(CLASS_NAMES[i], AUCs[i]))
示例#12
0
        time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f}'.format(best_acc))

    model.load_state_dict(best_model_wts)
    return model, val_acc_history


crop_size = 44
transform_train = transforms.Compose([
    transforms.RandomCrop(crop_size),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor()
])

transform_test = transforms.Compose([
    transforms.TenCrop(crop_size),
    transforms.Lambda(lambda crops: torch.stack(
        [transforms.ToTensor()(crop) for crop in crops]))
])

if args.checkpoint is None:
    start_epoch = 0
    model = VGG()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=5e-4)
else:
    checkpoint = torch.load(args.checkpoint)
    start_epoch = checkpoint['epoch'] + 1
    print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
def Mytrainsform():
	transform1 = transforms.Compose([
 	transforms.TenCrop(44),
    transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
	])
	return transform1   
示例#14
0
validation_dataset = ChestXrayDataSetMultiLabel(DATA_DIR,
                                                VALIDATION_IMAGE_LIST,
                                                transform=transform_val)
validation_dataloader = DataLoader(dataset=validation_dataset,
                                   batch_size=BATCH_SIZE,
                                   shuffle=False,
                                   num_workers=4,
                                   pin_memory=True,
                                   drop_last=True)

test_dataset = ChestXrayDataSetMultiLabel(
    data_dir=DATA_DIR,
    image_list_file=TEST_IMAGE_LIST,
    transform=transforms.Compose([
        transforms.Resize(256),
        transforms.TenCrop(224),
        transforms.Lambda(lambda crops: torch.stack(
            [transforms.ToTensor()(crop) for crop in crops])),
        transforms.Lambda(lambda crops: torch.stack([
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            (crop) for crop in crops
        ]))
    ]))

test_dataloader = DataLoader(dataset=test_dataset,
                             batch_size=BATCH_SIZE,
                             shuffle=False,
                             num_workers=4,
                             pin_memory=True,
                             drop_last=True)
示例#15
0
def main():

    # initialize and load the model
    cudnn.benchmark = True

    if USE_DENSENET:
        model = DenseNet121(N_CLASSES).cuda()
    else:
        model = ResNet18(N_CLASSES).cuda()

    print(model)

    model = torch.nn.DataParallel(model).cuda()

    if (USE_DENSENET):
        if os.path.isfile(CKPT_PATH):
            print("=> loading checkpoint")
            checkpoint = torch.load(CKPT_PATH)
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint")
        else:
            print("=> no checkpoint found")

    normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])

    #read training data and train
    train_dataset = ChestXrayDataSet(data_dir=DATA_DIR,
                                     image_list_file=TRAIN_IMAGE_LIST,
                                     transform=transforms.Compose([
                                         transforms.Resize((224, 224)),
                                         transforms.ToTensor(), normalize
                                     ]))
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              num_workers=8,
                              pin_memory=True)

    criterion = nn.BCELoss().cuda()
    optimizer = optim.Adam(model.parameters())

    for epoch in range(0, RUNS):
        print("Epoch " + str(epoch + 1))
        train_run(model, train_loader, optimizer, criterion, epoch)

    test_dataset = ChestXrayDataSet(
        data_dir=DATA_DIR,
        image_list_file=TEST_IMAGE_LIST,
        transform=transforms.Compose([
            transforms.Resize(256),
            transforms.TenCrop(224),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])),
            transforms.Lambda(
                lambda crops: torch.stack([normalize(crop) for crop in crops]))
        ]))
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=BATCH_SIZE,
                             shuffle=False,
                             num_workers=8,
                             pin_memory=True)

    # initialize the ground truth and output tensor
    gt = torch.FloatTensor()
    gt = gt.cuda()
    pred = torch.FloatTensor()
    pred = pred.cuda()

    # switch to evaluate mode
    model.eval()

    for i, (inp, target) in enumerate(test_loader):
        target = target.cuda()
        gt = torch.cat((gt, target), 0)
        bs, n_crops, c, h, w = inp.size()
        input_var = torch.autograd.Variable(inp.view(-1, c, h, w).cuda(),
                                            volatile=True)
        output = model(input_var)
        output_mean = output.view(bs, n_crops, -1).mean(1)
        pred = torch.cat((pred, output_mean.data), 0)

    AUROCs = compute_AUCs(gt, pred)
    AUROC_avg = np.array(AUROCs).mean()
    print('The average AUROC is {AUROC_avg:.3f}'.format(AUROC_avg=AUROC_avg))
    for i in range(N_CLASSES):
        print('The AUROC of {} is {}'.format(CLASS_NAMES[i], AUROCs[i]))
示例#16
0
def main():

    cudnn.benchmark = True

    # initialize and load the model
    model = DenseNet121(N_CLASSES).cuda()
    model = torch.nn.DataParallel(model).cuda()

    #if os.path.isfile(CKPT_PATH):
    if 0:
        print("=> loading checkpoint")
        checkpoint = torch.load(CKPT_PATH)
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint")
    else:
        print("=> no checkpoint found")

    normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])

    test_dataset = ChestXrayDataSet(
        data_dir=DATA_DIR,
        image_list_file=TEST_IMAGE_LIST,
        transform=transforms.Compose([
            transforms.Resize(256),
            transforms.TenCrop(224),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])),
            transforms.Lambda(
                lambda crops: torch.stack([normalize(crop) for crop in crops]))
        ]))
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=BATCH_SIZE,
                             shuffle=False,
                             num_workers=LOADER_WORKERS,
                             pin_memory=True)

    # initialize the ground truth and output tensor
    gt = torch.FloatTensor()
    gt = gt.cuda()
    pred = torch.FloatTensor()
    pred = pred.cuda()

    # switch to evaluate mode
    model.eval()
    print("starting loop")
    try:
        for i, (inp, target) in enumerate(test_loader):
            target = target.cuda()
            gt = torch.cat((gt, target), 0)
            bs, n_crops, c, h, w = inp.size()
            input_var = torch.autograd.Variable(inp.view(-1, c, h, w).cuda(),
                                                volatile=True)
            output = model(input_var)
            output_mean = output.view(bs, n_crops, -1).mean(1)
            pred = torch.cat((pred, output_mean.data), 0)
            if not i % 100:
                print("iteration " + str(i))
    except:
        print('error in iteration: ' + str(i))

    AUROCs = compute_AUCs(gt, pred)
    AUROC_avg = np.array(AUROCs).mean()
    print('The average AUROC is {AUROC_avg:.3f}'.format(AUROC_avg=AUROC_avg))
    for i in range(N_CLASSES):
        print('The AUROC of {} is {}'.format(CLASS_NAMES[i], AUROCs[i]))
cut_size = 44
total_epoch = 250

path = os.path.join(opt.dataset + '_2_' + opt.model)

# Data
print('==> Preparing data..')
transform_train = tfs.Compose([
    tfs.RandomCrop(cut_size),
    tfs.RandomHorizontalFlip(),
    tfs.ToTensor(),
])

transform_test = tfs.Compose([
    tfs.TenCrop(cut_size),
    tfs.Lambda(
        lambda crops: torch.stack([tfs.ToTensor()(crop) for crop in crops])),
])

trainset = FER2013(split='Training', transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=opt.bs,
                                          shuffle=True,
                                          num_workers=1)
PublicTestset = FER2013(split='PublicTest', transform=transform_test)
PublicTestloader = torch.utils.data.DataLoader(PublicTestset,
                                               batch_size=opt.bs,
                                               shuffle=False,
                                               num_workers=1)
PrivateTestset = FER2013(split='PrivateTest', transform=transform_test)
示例#18
0
def get_data(data_path):
    with open(data_path, 'rb') as f:
        train_test_paths_labels = pickle.load(f)
    train_paths = train_test_paths_labels[0]
    val_paths = train_test_paths_labels[1]
    train_labels = train_test_paths_labels[3]
    val_labels = train_test_paths_labels[4]
    train_num_each = train_test_paths_labels[6]
    val_num_each = train_test_paths_labels[7]

    print('train_paths  : {:6d}'.format(len(train_paths)))
    print('train_labels : {:6d}'.format(len(train_labels)))
    print('valid_paths  : {:6d}'.format(len(val_paths)))
    print('valid_labels : {:6d}'.format(len(val_labels)))

    train_labels = np.asarray(train_labels, dtype=np.int64)
    val_labels = np.asarray(val_labels, dtype=np.int64)

    train_transforms = None
    test_transforms = None

    if use_flip == 0:
        train_transforms = transforms.Compose([
            transforms.Resize((250, 250)),
            transforms.RandomCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.44893518,0.3226702,0.34424525],[0.22357443,0.18503027,0.1900281])
        ])
    elif use_flip == 1:
        train_transforms = transforms.Compose([
            transforms.Resize((250, 250)),
            RandomCrop(224),
            ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.05),
            RandomHorizontalFlip(),
            RandomRotation(5),
            transforms.ToTensor(),
            transforms.Normalize([0.44893518,0.3226702,0.34424525],[0.22357443,0.18503027,0.1900281])
        ])

    if crop_type == 0:
        test_transforms = transforms.Compose([
            transforms.Resize((250, 250)),
            transforms.RandomCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.44893518,0.3226702,0.34424525],[0.22357443,0.18503027,0.1900281])
        ])
    elif crop_type == 1:
        test_transforms = transforms.Compose([
            transforms.Resize((250, 250)),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.44893518,0.3226702,0.34424525],[0.22357443,0.18503027,0.1900281])
        ])
    elif crop_type == 2:
        test_transforms = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize([0.44893518,0.3226702,0.34424525],[0.22357443,0.18503027,0.1900281])
        ])
    elif crop_type == 5:
        test_transforms = transforms.Compose([
            transforms.Resize((250, 250)),
            transforms.FiveCrop(224),
            Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
            Lambda(
                lambda crops: torch.stack(
                    [transforms.Normalize([0.44893518,0.3226702,0.34424525],[0.22357443,0.18503027,0.1900281])(crop) for crop in crops]))
        ])
    elif crop_type == 10:
        test_transforms = transforms.Compose([
            transforms.Resize((250, 250)),
            transforms.TenCrop(224),
            Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
            Lambda(
                lambda crops: torch.stack(
                    [transforms.Normalize([0.44893518,0.3226702,0.34424525],[0.22357443,0.18503027,0.1900281])(crop) for crop in crops]))
        ])

    train_dataset = CholecDataset(train_paths, train_labels, train_transforms)
    val_dataset = CholecDataset(val_paths, val_labels, test_transforms)
#    test_dataset = CholecDataset(test_paths, test_labels, test_transforms)

    return train_dataset, train_num_each, val_dataset, val_num_each
示例#19
0
    norm_mean = [0.485, 0.456, 0.406]
    norm_std = [0.229, 0.224, 0.225]

    train_transform = transforms.Compose([
        transforms.Resize((256)),  # (256, 256) 区别,(256)表示短边到256,长宽比不变
        transforms.CenterCrop(256),
        transforms.RandomCrop(224),
        transforms.RandomHorizontalFlip(p=0.5),  # p表示水平翻转概率
        transforms.ToTensor(),
        transforms.Normalize(norm_mean, norm_std),
    ])

    normalizes = transforms.Normalize(norm_mean, norm_std)
    valid_transform = transforms.Compose([
        transforms.Resize((256, 256)),
        transforms.TenCrop(224, vertical_flip=False),
        transforms.Lambda(lambda crops: torch.stack(
            [normalizes(transforms.ToTensor()(crop)) for crop in crops])),
    ])  # 5维张量:[B,10,C,H,W]

    # 构建MyDataset实例
    train_data = CatDogDataset(data_dir=data_dir,
                               mode="train",
                               transform=train_transform)
    valid_data = CatDogDataset(data_dir=data_dir,
                               mode="valid",
                               transform=valid_transform)

    # 构建DataLoder
    train_loader = DataLoader(dataset=train_data,
                              batch_size=BATCH_SIZE,
    def test(pathDirData, pathFileTest, pathModel, nnArchitecture,
             nnClassCount, nnIsTrained, trBatchSize, transResize, transCrop,
             launchTimeStamp):

        CLASS_NAMES = [
            'Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass',
            'Nodule', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema',
            'Emphysema', 'Fibrosis', 'Pleural_Thickening', 'Hernia'
        ]

        cudnn.benchmark = True

        #-------------------- SETTINGS: NETWORK ARCHITECTURE, MODEL LOAD
        if nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, nnIsTrained).cuda()

        model = torch.nn.DataParallel(model).cuda()

        modelCheckpoint = torch.load(pathModel)
        model.load_state_dict(modelCheckpoint['state_dict'])

        #-------------------- SETTINGS: DATA TRANSFORMS, TEN CROPS
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])

        #-------------------- SETTINGS: DATASET BUILDERS
        transformList = []
        transformList.append(transforms.Resize(transResize))
        transformList.append(transforms.TenCrop(transCrop))
        transformList.append(
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])))
        transformList.append(
            transforms.Lambda(lambda crops: torch.stack(
                [normalize(crop) for crop in crops])))
        transformSequence = transforms.Compose(transformList)

        datasetTest = DatasetGenerator(pathImageDirectory=pathDirData,
                                       pathDatasetFile=pathFileTest,
                                       transform=transformSequence)
        dataLoaderTest = DataLoader(dataset=datasetTest,
                                    batch_size=trBatchSize,
                                    num_workers=0,
                                    shuffle=False,
                                    pin_memory=False)

        outGT = torch.FloatTensor().cuda()
        outPRED = torch.FloatTensor().cuda()

        model.eval()

        for i, (input, target) in enumerate(dataLoaderTest):

            target = target.cuda()
            outGT = torch.cat((outGT, target), 0)

            bs, n_crops, c, h, w = input.size()

            varInput = torch.autograd.Variable(input.view(-1, c, h, w).cuda())

            out = model(varInput)
            outMean = out.view(bs, n_crops, -1).mean(1)

            outPRED = torch.cat((outPRED, outMean.data), 0)

        aurocIndividual = ChexnetTrainer.computeAUROC(outGT, outPRED,
                                                      nnClassCount)
        aurocMean = np.array(aurocIndividual).mean()

        print('AUROC mean ', aurocMean)

        for i in range(0, len(aurocIndividual)):
            print(CLASS_NAMES[i], ' ', aurocIndividual[i])

        return
示例#21
0
    def predict(pathDirData, pathFileTest, pathModel, nnArchitecture,
                nnClassCount, nnIsTrained, trBatchSize, transResize, transCrop,
                launchTimeStamp):

        cudnn.benchmark = True

        #-------------------- SETTINGS: NETWORK ARCHITECTURE, MODEL LOAD
        if nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, nnIsTrained).cuda()

        model = torch.nn.DataParallel(model).cuda()

        modelCheckpoint = torch.load(pathModel)
        # https://github.com/KaiyangZhou/deep-person-reid/issues/23
        pattern = re.compile(
            r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$'
        )
        state_dict = modelCheckpoint['state_dict']
        for key in list(state_dict.keys()):
            res = pattern.match(key)
            if res:
                new_key = res.group(1) + res.group(2)
                state_dict[new_key] = state_dict[key]
                del state_dict[key]
        model.load_state_dict(state_dict)

        #-------------------- SETTINGS: DATA TRANSFORMS, TEN CROPS
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])

        #-------------------- SETTINGS: DATASET BUILDERS
        transformList = []
        transformList.append(transforms.Resize(transResize))
        transformList.append(transforms.TenCrop(transCrop))
        transformList.append(
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])))
        transformList.append(
            transforms.Lambda(lambda crops: torch.stack(
                [normalize(crop) for crop in crops])))
        transformSequence = transforms.Compose(transformList)

        datasetTest = DatasetGenerator(pathImageDirectory=pathDirData,
                                       pathDatasetFile=pathFileTest,
                                       transform=transformSequence)
        dataLoaderTest = DataLoader(dataset=datasetTest,
                                    batch_size=trBatchSize,
                                    shuffle=False,
                                    pin_memory=True)  #num_workers=N

        outGT = torch.FloatTensor().cuda()
        outPRED = torch.FloatTensor().cuda()

        model.eval()

        for i, (input, target) in enumerate(dataLoaderTest):

            target = target.cuda()
            outGT = torch.cat((outGT, target), 0)

            bs, n_crops, c, h, w = input.size()

            varInput = torch.autograd.Variable(input.view(-1, c, h, w).cuda(),
                                               volatile=True)

            out = model(varInput)

            outMean = out.view(bs, n_crops, -1).mean(1)

            outPRED = torch.cat((outPRED, outMean.data), 0)

        pneumonia_probas = []
        for p in outPRED.cpu().data.numpy()[:, CLASS_NAMES.index('Pneumonia')]:
            pneumonia_probas.append(p)

        return pneumonia_probas
示例#22
0
def main():
    global args
    args = parser.parse_args()

    #fix random seeds
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)

    best_prec1 = 0

    # load model
    model = load_model(args.model)

    cudnn.benchmark = True

    # freeze the features layers
    for block in model.module:
        try:
            for param in block.parameters():
                param.requires_grad = False
        except:
            for layer in block:
                for param in layer.parameters():
                    param.requires_grad = False

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    # data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val_in_folders')
    valdir_double = os.path.join(args.data, 'val_in_double_folders')
    valdir_list = []

    # Load in AoA table if needed
    if args.aoaval:
        aoalist = pd.read_csv('matchingAoA_ImageNet_excel.csv')
        for index, row in aoalist.iterrows():
            node = row['node']
            aoa = float(row['aoa'])
            if not math.isnan(aoa):
                valdir_list.append({
                    'node': node,
                    'pth': os.path.join(valdir_double, node),
                    'aoa': aoa
                })
            else:
                print('Not found %s' % node)

        #valdir_list=valdir_list[:5] trim for testing
        print('Using %d validation categories for aoa' % len(valdir_list))

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    # Can't do validation if the tencrops option is chosne a
    if args.tencrops:
        transformations_val = [
            transforms.Resize(256),
            transforms.TenCrop(224),
            transforms.Lambda(lambda crops: torch.stack(
                [normalize(transforms.ToTensor()(crop)) for crop in crops])),
        ]
    else:
        transformations_val = [
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(), normalize
        ]

    transformations_train = [
        transforms.Resize(256),
        transforms.CenterCrop(256),
        transforms.RandomCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), normalize
    ]
    train_dataset = datasets.ImageFolder(
        traindir, transform=transforms.Compose(transformations_train))

    val_dataset = datasets.ImageFolder(
        valdir, transform=transforms.Compose(transformations_val))

    # Load up individual categories for AoA validation
    if args.aoaval:
        val_list_dataset = []
        val_list_loader = []
        for entry in valdir_list:
            val_list_dataset.append(
                datasets.ImageFolder(
                    entry['pth'],
                    transform=transforms.Compose(transformations_val)))

            val_list_loader.append(
                torch.utils.data.DataLoader(val_list_dataset[-1],
                                            batch_size=50,
                                            shuffle=False,
                                            num_workers=args.workers))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=int(args.batch_size /
                                                            2),
                                             shuffle=False,
                                             num_workers=args.workers)

    # logistic regression
    reglog = RegLog(args.conv, len(train_dataset.classes)).cuda()
    optimizer = torch.optim.SGD(filter(lambda x: x.requires_grad,
                                       reglog.parameters()),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=10**args.weight_decay)

    # create logs
    exp_log = os.path.join(args.exp, 'log')
    if not os.path.isdir(exp_log):
        os.makedirs(exp_log)

    loss_log = Logger(os.path.join(exp_log, 'loss_log'))
    prec1_log = Logger(os.path.join(exp_log, 'prec1'))
    prec5_log = Logger(os.path.join(exp_log, 'prec5'))

    for epoch in range(args.epochs):
        end = time.time()

        # If savedmodel already exists, load this
        savedmodelpth = os.path.join(args.exp, 'model_best.pth.tar')
        if os.path.exists(savedmodelpth):
            print('Loading saved decoder %s' % savedmodelpth)
            model_with_decoder = torch.load(savedmodelpth)
            reglog.load_state_dict(model_with_decoder['reglog_state_dict'])
        else:
            # train for one epoch
            train(train_loader, model, reglog, criterion, optimizer, epoch)
            # evaluate on validation set
            prec1, prec5, loss = validate(val_loader, model, reglog, criterion)

            loss_log.log(loss)
            prec1_log.log(prec1)
            prec5_log.log(prec5)

            # remember best prec@1 and save checkpoint
            is_best = prec1 > best_prec1
            best_prec1 = max(prec1, best_prec1)
            if is_best:
                filename = 'model_best.pth.tar'
            else:
                filename = 'checkpoint.pth.tar'
            torch.save(
                {
                    'epoch': epoch + 1,
                    'arch': 'alexnet',
                    'state_dict': model.state_dict(),
                    'reglog_state_dict':
                    reglog.state_dict(),  # Also save decoding layers
                    'prec5': prec5,
                    'best_prec1': best_prec1,
                    'optimizer': optimizer.state_dict(),
                },
                savedmodelpth)

        if args.aoaval:
            # Validate individual categories, so loss can be compared to AoA

            # # To check weights loaded OK
            # # evaluate on validation set
            # prec1, prec5, loss = validate(val_loader, model, reglog, criterion)

            # loss_log.log(loss)
            # prec1_log.log(prec1)
            # prec5_log.log(prec5)

            aoares = {}

            for idx, row in enumerate(zip(valdir_list, val_list_loader)):
                # evaluate on validation set
                print("AOA validation %d/%d" % (idx, len(valdir_list)))
                prec1_tmp, prec5_tmp, loss_tmp = validate(
                    row[1], model, reglog, criterion)
                aoares[row[0]['node']] = {
                    'prec1': float(prec1_tmp),
                    'prec5': float(prec5_tmp),
                    'loss': float(loss_tmp),
                    'aoa': row[0]['aoa']
                }

            # Save to JSON
            aoapth = os.path.join(args.exp, 'aoaresults.json')
            with open(aoapth, 'w') as f:
                json.dump(aoares, f)
示例#23
0
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
    transforms.RandomCrop(44),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    utils.Cutout(n_holes=1, length=13),
    #transforms.Normalize((0.589499, 0.45687565, 0.40699387), 
                            #(0.25357702, 0.23312956, 0.23275192)),
    transforms.Normalize((0.56010324, 0.43693307, 0.39122295), 
                            (0.23726934, 0.21260591, 0.20737909)), #Augmentation
])

transform_test = transforms.Compose([
    transforms.TenCrop(44),
    #transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
    transforms.Lambda(lambda crops: torch.stack([transforms.Normalize(
            mean=[0.589499, 0.45687565, 0.40699387], std=[0.25357702, 0.23312956, 0.23275192])
            (transforms.ToTensor()(crop)) for crop in crops])),
])

trainset = RAF(split = 'Training', transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=opt.train_bs, shuffle=True, num_workers=1)

PrivateTestset = RAF(split = 'PrivateTest', transform=transform_test)
PrivateTestloader = torch.utils.data.DataLoader(PrivateTestset, batch_size=opt.test_bs, shuffle=False, num_workers=1)

# Model
if opt.model == 'VGG19':
    net = VGG('VGG19')
示例#24
0
def load_imagenet(data_root,
                  batch_size_train,
                  batch_size_val,
                  augement,
                  num_workers=12,
                  ret='loader',
                  **kwargs):
    """
    augment: {'train': ['rresize-1crop', '1resize-1crop', '2resize-1crop', '1resize-0crop'][0],
              'val': ['center-crop'][0]}

    'rresize-1crop' random resized and crop, used in torch.transforms.RandomResizedCrop().
    '2resize-1crop' used in resnet paper, resize to (256 or 480), then crop to 224, then flip.

    MinMaxResizedCrop: 先缩放再裁切 Size'=Size*scale*1
    minmax: (256, 480) (300, 500)
        下界太小,导致物体有效像素占比太小,物体圆满性被破坏! 切太稀!
        上界太大,会导致图片被切的太碎,物体完整性被破坏! 切太碎!

    RandomResizedCrop: 先裁切再缩放 Size'=Size*scale*β_auto_crop
    scale: (1/5, 4/5)=(0.2~0.8)
        下界太小,会导致图片被切的太碎,物体完整性被破坏! 切太碎!
        上界太大,导致物体有效像素占比太小,物体圆满性被破坏! 切太稀!
    """
    # titanxp_root /data/dataset/ImageNetDownSample/   64x64  32x32
    # 1080ti_root  /data0/ImageNet_ILSVRC2012/
    # K40_root     no

    traindir = os.path.join(data_root, 'train')
    valdir = os.path.join(data_root, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    insize = augement.get('insize', None)
    xscale = augement.get('xscale', [])  # for train
    vscale = augement.get('vscale', [])  # for validate
    assert insize is not None, '输入图片的尺寸 <insize @cfg.data_augment> 未设置.'
    assert isinstance(
        xscale,
        (list, tuple)), '<xscale @cfg.data_augment> must be list or tuple'
    assert isinstance(
        vscale,
        (list, tuple)), '<vscale @cfg.data_augment> must be list or tuple'

    if augement['train'] == 'rresize-1crop':
        # +随机尺寸变换 + 随机形状变换 +随机平移变换
        scale = augement.get('scale', (0.08, 1))
        ratio = augement.get('ratio', (3. / 4, 4. / 3))
        resize_crop = [transforms.RandomResizedCrop(insize, scale, ratio)]
    elif augement['train'] == 'rotate-rresize-1crop':
        degree = augement.get('degree', (-5, 5))
        scale = augement.get('scale', (0.08, 1))
        ratio = augement.get('ratio', (3. / 4, 4. / 3))
        if tuple(degree) == (0, 0):
            resize_crop = [transforms.RandomResizedCrop(insize, scale, ratio)]
        else:
            resize_crop = [
                transforms.RandomRotation(degree),
                transforms.RandomResizedCrop(insize, scale, ratio)
            ]
    elif augement['train'] == 'rotate-rresize-1crop-color':
        degree = augement.get('degree', (-5, 5))
        scale = augement.get('scale', (0.08, 1))
        ratio = augement.get('ratio', (3. / 4, 4. / 3))
        brightness = augement.get('brightness', 0.2)
        contrast = augement.get('contrast', 0.2)
        saturation = augement.get('saturation', 0.2)
        hue = augement.get('hue', 0.2)
        if tuple(degree) == (0, 0):
            resize_crop = [transforms.RandomResizedCrop(insize, scale, ratio)]
        else:
            resize_crop = [
                transforms.RandomRotation(degree),
                transforms.RandomResizedCrop(insize, scale, ratio)
            ]
        color_jitter = transforms.ColorJitter(brightness, contrast, saturation,
                                              hue)
        resize_crop.append(color_jitter)
    elif augement['train'] == 'xresize-1crop':
        # +固定X尺寸变换 -形状变换 +随机平移变换
        assert len(xscale) >= 1 and min(xscale) >= insize
        resize_list = [transforms.Resize(x) for x in xscale]
        resize_crop = [
            transforms.RandomChoice(resize_list),
            transforms.RandomCrop(insize)
        ]
    elif augement['train'] == 'minmax-resize-1crop':
        assert len(xscale) == 2 and min(xscale) > insize
        resize_crop = [
            MinMaxResize(min(xscale), max(xscale)),
            transforms.RandomCrop(insize)
        ]
    elif augement['train'] == 'rotate-minmax-resize-1crop':
        assert len(xscale) == 2 and min(xscale) > insize
        degree = augement.get('degree', (-5, 5))
        if tuple(degree) == (0, 0):
            resize_crop = [
                MinMaxResize(min(xscale), max(xscale)),
                transforms.RandomCrop(insize)
            ]
        else:
            resize_crop = [
                transforms.RandomRotation(degree),
                MinMaxResize(min(xscale), max(xscale)),
                transforms.RandomCrop(insize)
            ]
    elif augement['train'] == '1resize-0crop':
        resize_crop = [transforms.Resize(insize)]
    elif augement['train'] == '0resize-1crop':
        resize_crop = [transforms.RandomCrop(insize)]
    elif augement['train'] == 'xresize-1affine-1crop':
        # +固定X尺寸变换 + 仿射变换 -形状变换 +随机平移变换
        assert len(xscale) >= 1 and min(xscale) >= insize
        resize_list = [transforms.Resize(x) for x in xscale]
        resize_crop = [
            transforms.RandomChoice(resize_list),
            transforms.RandomAffine(degrees=(-15, 15),
                                    translate=None,
                                    scale=None,
                                    shear=(-5, 5)),
            transforms.RandomCrop(insize)
        ]
    elif augement['train'] == 'xresize-raffine-1crop':
        # +固定X尺寸变换 + 随机仿射变换 -形状变换 +随机平移变换
        assert len(xscale) >= 1 and min(xscale) >= insize
        resize_list = [transforms.Resize(x) for x in xscale]
        resize_crop = [
            transforms.RandomChoice(resize_list),
            transforms.RandomApply(
                [transforms.RandomAffine(degrees=(-15, 15), shear=(-5, 5))]),
            transforms.RandomCrop(insize)
        ]
    elif augement['train'] == 'xresize-caffine-1crop':
        # +固定X尺寸变换 + 仿射变换 -形状变换 +随机平移变换
        assert len(xscale) >= 1 and min(xscale) >= insize
        resize_list = [transforms.Resize(x) for x in xscale]
        resize_crop = [
            transforms.RandomChoice(resize_list),
            transforms.RandomChoice([
                transforms.RandomAffine(degrees=15, shear=None),
                transforms.RandomAffine(degrees=0, shear=10)
            ]),
            transforms.RandomCrop(insize)
        ]
    else:
        raise NotImplementedError('Unknown Resize & Crop Method %s ...' %
                                  (augement, ))

    if augement.get('color', None):
        other_process = [
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            ColorAugmentation(),
            normalize,
        ]
    else:
        other_process = [
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]

    train_transform = transforms.Compose(resize_crop + other_process)

    if augement['val'] == '1resize-1crop':
        assert len(vscale) == 1 and vscale[0] >= insize
        val_transform = transforms.Compose([
            transforms.Resize(vscale[0]),
            transforms.CenterCrop(insize),
            transforms.ToTensor(),
            normalize,
        ])
    elif augement['val'] == '1resize-0crop':
        assert insize > 0
        val_transform = transforms.Compose([
            transforms.Resize(insize),
            transforms.ToTensor(),
            normalize,
        ])
    elif augement['val'] == '1resize-1crop-color':
        assert len(vscale) == 1 and vscale[0] >= insize
        val_transform = transforms.Compose([
            transforms.Resize(vscale[0]),
            transforms.CenterCrop(insize),
            transforms.ToTensor(),
            ColorAugmentation(),
            normalize,
        ])
    elif augement['val'] == '1resize-5crop':
        assert len(vscale) == 1 and vscale[0] >= insize
        val_transform = transforms.Compose([
            transforms.Resize(vscale[0]),
            transforms.FiveCrop(insize),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])),
            transforms.Lambda(lambda crops: torch.stack(
                [normalize(crop) for crop in crops])),
        ])
    elif augement['val'] == '1resize-10crop':
        assert len(vscale) == 1 and vscale[0] >= insize
        val_transform = transforms.Compose([
            transforms.Resize(vscale[0]),
            transforms.TenCrop(insize),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])),
            transforms.Lambda(lambda crops: torch.stack(
                [normalize(crop) for crop in crops])),
        ])
    else:
        raise NotImplementedError('Unknown Resize & Crop Method %s ...' %
                                  (augement, ))

    print('\nWarning: Please Assure Train-Transform is -->\n' +
          repr(train_transform))
    print('\nWarning: Please Assure Val-Transform is -->\n' +
          repr(val_transform))

    train_dataset = datasets.ImageFolder(traindir, train_transform)
    val_dataset = datasets.ImageFolder(valdir, val_transform)
    test_dataset = val_dataset
    if ret == 'dataset':
        return train_dataset, val_dataset, test_dataset
    elif ret == 'loader':
        train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                                   batch_size=batch_size_train,
                                                   shuffle=True,
                                                   num_workers=num_workers,
                                                   pin_memory=True)
        val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
                                                 batch_size=batch_size_val,
                                                 shuffle=False,
                                                 num_workers=16,
                                                 pin_memory=False)
        test_loader = val_loader
        return train_loader, val_loader, test_loader
    else:
        raise NotImplementedError('<ret> must be loader or dataset!')
示例#25
0
def load_imagenet(data_root,
                  bsize_train,
                  bsize_val,
                  augement,
                  num_workers=12,
                  result='loader',
                  **kwargs):
    """
    augment: {'train': ['rresize-1crop', '1resize-1crop', '2resize-1crop', '1resize-0crop'][0],
              'val': ['center-crop'][0]}
    imsize / insize = 0.875, eg. 256/0.875=224
    """

    traindir = os.path.join(data_root, 'train')
    valdir = os.path.join(data_root, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    imsize = augement.get('imsize', None)
    insize = augement.get('insize', None)
    interp = augement.get('interp', 'bilinear')
    assert imsize is not None, '数据扩增的图片尺寸 <imsize @cfg.data_augment> 未设置.'
    assert insize is not None, '网络输入的图片尺寸 <insize @cfg.data_augment> 未设置.'
    assert imsize >= insize, 'imsize must > insize.'
    assert interp in ['linear', 'bilinear', 'bicubic']
    interp = getattr(Image, interp.upper())

    if augement['train'] == 'rotate-rresize-1crop':
        degree = augement.get('degree', (0, 0))
        scale = augement.get('scale', (0.08, 1))
        ratio = augement.get('ratio', (3. / 4, 4. / 3))
        resize_crop = []
        if tuple(degree) != (0, 0):
            resize_crop.append(transforms.RandomRotation(degree))
        resize_crop.append(
            transforms.RandomResizedCrop(insize, scale, ratio, interp))

    elif augement['train'] == 'rotate-rresize-1crop-colorj':
        degree = augement.get('degree', (0, 0))
        scale = augement.get('scale', (0.08, 1))
        ratio = augement.get('ratio', (3. / 4, 4. / 3))
        brightness = augement.get('brightness', 0.2)
        contrast = augement.get('contrast', 0.2)
        saturation = augement.get('saturation', 0.2)
        hue = augement.get('hue', 0.2)
        resize_crop = []
        if tuple(degree) != (0, 0):
            resize_crop.append(transforms.RandomRotation(degree))
        resize_crop.append(
            transforms.RandomResizedCrop(insize, scale, ratio, interp))
        color_jitter = transforms.ColorJitter(brightness, contrast, saturation,
                                              hue)
        resize_crop.append(color_jitter)

    elif augement['train'] == 'rotate-mmresize-1crop':
        mmsize = augement.get('mmsize', [])
        assert len(mmsize) == 2 and min(mmsize) > insize
        degree = augement.get('degree', (0, 0))
        resize_crop = []
        if tuple(degree) != (0, 0):
            resize_crop.append(transforms.RandomRotation(degree))
        resize_crop.append(MinMaxResize(min(mmsize), max(mmsize), interp))
        resize_crop.append(transforms.RandomCrop(insize))

    elif augement['train'] == 'mmresize-raffine-1crop':
        mmsize = augement.get('mmsize', [])
        assert len(mmsize) == 2 and min(mmsize) > insize
        resize_crop = [
            MinMaxResize(min(mmsize), max(mmsize), interp),
            transforms.RandomApply(
                [transforms.RandomAffine(degrees=(-15, 15), shear=(-5, 5))]),
            transforms.RandomCrop(insize)
        ]
    else:
        raise NotImplementedError('Unknown Resize & Crop Method %s ...' %
                                  (augement, ))

    other_process = [
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ]
    if augement.get('color', None):
        other_process.insert(-1, ColorAugmentation())
    train_transform = transforms.Compose(resize_crop + other_process)

    if augement['val'] == '1resize-0crop':
        assert insize > 0
        val_transform = transforms.Compose([
            transforms.Resize(insize, interp),
            transforms.ToTensor(),
            normalize,
        ])
    elif augement['val'] == '1resize-1crop':
        val_transform = transforms.Compose([
            transforms.Resize(imsize, interp),
            transforms.CenterCrop(insize),
            transforms.ToTensor(),
            normalize,
        ])
    elif augement['val'] == '1resize-10crop':
        val_transform = transforms.Compose([
            transforms.Resize(imsize, interp),
            transforms.TenCrop(insize),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])),
            transforms.Lambda(lambda crops: torch.stack(
                [normalize(crop) for crop in crops])),
        ])
    else:
        raise NotImplementedError('Unknown Resize & Crop Method %s ...' %
                                  (augement, ))

    # print('\nWarning: Please Assure Train-Transform is -->\n' + repr(train_transform))
    # print('\nWarning: Please Assure Val-Transform is -->\n' + repr(val_transform))

    train_dataset = datasets.ImageFolder(traindir, train_transform)
    val_dataset = datasets.ImageFolder(valdir, val_transform)
    test_dataset = val_dataset

    if result == 'dataset':
        return train_dataset, val_dataset, test_dataset

    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=bsize_train,
                                               shuffle=True,
                                               num_workers=num_workers,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
                                             batch_size=bsize_val,
                                             shuffle=False,
                                             num_workers=num_workers,
                                             pin_memory=True)
    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=bsize_val,
                                              shuffle=False,
                                              num_workers=num_workers,
                                              pin_memory=True)

    return train_loader, val_loader, test_loader
def main():
    args = parser.parse_args()
    print(args)

    # fix random seeds
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)

    # create model and move it to gpu
    model = load_model(args.model)
    model.top_layer = nn.Linear(model.top_layer.weight.size(1), 20)
    model.cuda()
    cudnn.benchmark = True

    # what partition of the data to use
    if args.split == 'train':
        args.test = 'val'
    elif args.split == 'trainval':
        args.test = 'test'
    # data loader
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    dataset = VOC2007_dataset(args.vocdir,
                              split=args.split,
                              transform=transforms.Compose([
                                  transforms.RandomHorizontalFlip(),
                                  transforms.RandomResizedCrop(
                                      224,
                                      scale=(args.min_scale, args.max_scale),
                                      ratio=(1, 1)),
                                  transforms.ToTensor(),
                                  normalize,
                              ]))

    loader = torch.utils.data.DataLoader(dataset,
                                         batch_size=16,
                                         shuffle=False,
                                         num_workers=24,
                                         pin_memory=True)
    print('PASCAL VOC 2007 ' + args.split + ' dataset loaded')

    # re initialize classifier
    for y, m in enumerate(model.classifier.modules()):
        if isinstance(m, nn.Linear):
            m.weight.data.normal_(0, 0.01)
            m.bias.data.fill_(0.1)
    model.top_layer.bias.data.fill_(0.1)

    if args.fc6_8:
        # freeze some layers
        for param in model.features.parameters():
            param.requires_grad = False
        # unfreeze batchnorm scaling
        if args.train_batchnorm:
            for layer in model.modules():
                if isinstance(layer, torch.nn.BatchNorm2d):
                    for param in layer.parameters():
                        param.requires_grad = True

    # set optimizer
    optimizer = torch.optim.SGD(
        filter(lambda x: x.requires_grad, model.parameters()),
        lr=args.lr,
        momentum=0.9,
        weight_decay=args.wd,
    )

    criterion = nn.BCEWithLogitsLoss(reduction='none')

    print('Start training')
    it = 0
    losses = AverageMeter()
    while it < args.nit:
        it = train(
            loader,
            model,
            optimizer,
            criterion,
            args.fc6_8,
            losses,
            it=it,
            total_iterations=args.nit,
            stepsize=args.stepsize,
        )

    print('Evaluation')
    if args.eval_random_crops:
        transform_eval = [
            transforms.RandomHorizontalFlip(),
            transforms.RandomResizedCrop(224,
                                         scale=(args.min_scale,
                                                args.max_scale),
                                         ratio=(1, 1)),
            transforms.ToTensor(),
            normalize,
        ]
    else:
        transform_eval = [
            transforms.Resize(256),
            transforms.TenCrop(224),
            transforms.Lambda(lambda crops: torch.stack(
                [normalize(transforms.ToTensor()(crop)) for crop in crops]))
        ]

    print('Train set')
    train_dataset = VOC2007_dataset(
        args.vocdir,
        split=args.split,
        transform=transforms.Compose(transform_eval))
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=1,
        shuffle=False,
        num_workers=24,
        pin_memory=True,
    )
    evaluate(train_loader, model, args.eval_random_crops)

    print('Test set')
    test_dataset = VOC2007_dataset(
        args.vocdir,
        split=args.test,
        transform=transforms.Compose(transform_eval))
    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=1,
        shuffle=False,
        num_workers=24,
        pin_memory=True,
    )
    evaluate(test_loader, model, args.eval_random_crops)
def test_cnn(MODEL_NAME, MODEL_NAME_TARGET, BATCH_SIZE, N_LABELS,
             PATH_TO_IMAGES, DEBUG_MODE, CHECKPOINT_PATH,
             CHECKPOINT_PATH_TARGET):
    """
    Train torchvision model to NIH data given high level hyperparameters.

    Args:
        MODEL_NAME: model name
        MODEL_NAME_TARGET: the other model name
        BATCH_SIZE: number of batch data per training
        N_LABELS: number of class labels
        PATH_TO_IMAGES: path to NIH images
        DEBUG_MODE: if true then no log will be created
        CHECKPOINT_PATH: load checkpoint path
        CHECKPOINT_PATH_TARGET: load the other checkpoint path
    Returns:
        # preds: torchvision model predictions on test fold with ground truth for comparison
        # aucs: AUCs for each train,test tuple
    """

    # use imagenet mean,std for normalization
    mean = [0.485, 0.456, 0.406]
    std = [0.229, 0.224, 0.225]
    normalize = transforms.Normalize(mean=mean, std=std)

    # define torchvision transforms
    data_transforms = {
        'train':
        transforms.Compose([
            transforms.Resize(256),
            transforms.TenCrop(224),
            transforms.Lambda(lambda crops: torch.stack(
                [normalize(transforms.ToTensor()(crop)) for crop in crops]))
        ]),
        'val':
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean, std)
        ]),
        'test':
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean, std)
        ])
    }

    # create train/val dataloaders
    transformed_datasets = {
        x: datasets.ImageFolder(os.path.join(PATH_TO_IMAGES, x),
                                data_transforms[x])
        for x in ['test']
    }

    dataloaders = {
        x: torch.utils.data.DataLoader(transformed_datasets[x],
                                       batch_size=BATCH_SIZE,
                                       shuffle=True,
                                       num_workers=0)
        for x in ['test']
    }

    # please do not attempt to train without GPU as will take excessively long
    if not use_gpu:
        raise ValueError("Error, requires GPU")

    model = init_model(MODEL_NAME, N_LABELS)
    model = load_checkpoint(model, CHECKPOINT_PATH)
    model = model.cuda()

    if (CHECKPOINT_PATH_TARGET):
        model_target = init_model(MODEL_NAME_TARGET, N_LABELS)
        model_target = load_checkpoint(model_target, CHECKPOINT_PATH_TARGET)
        model_target = model_target.cuda()

    loading_bar = ''
    dataloaders_length = len(dataloaders['test'])
    for i in range(dataloaders_length):
        loading_bar += '-'

    model_labels = []
    model_pred = []
    model_target_pred = []

    model_pred_bin = []
    model_target_pred_bin = []

    for phase in ['test']:
        for data in dataloaders[phase]:
            loading_bar = f'={loading_bar}'
            loading_bar = loading_bar[:dataloaders_length]
            print(f'Testing: {loading_bar}', end='\r')

            inputs, labels = data
            if phase == 'test':

                inputs = inputs.cuda()
                labels = labels.cpu().data.numpy()
                model_labels.extend(labels)

                outputs = model(inputs)
                outputs_pred = torch.max(outputs, dim=1)[1].cpu().data.numpy()
                model_pred.extend(outputs_pred)
                if (CHECKPOINT_PATH_TARGET):
                    outputs_target = model_target(inputs)
                    outputs_target_pred = torch.max(
                        outputs_target, dim=1)[1].cpu().data.numpy()
                    model_target_pred.extend(outputs_target_pred)

    print('')

    for i, _ in enumerate(model_labels):
        model_pred_bin.append(1 if model_labels[i] == model_pred[i] else 0)
        if (CHECKPOINT_PATH_TARGET):
            model_target_pred_bin.append(1 if model_labels[i] ==
                                         model_target_pred[i] else 0)

    print(accuracy_score(model_labels, model_pred))
    print(f1_score(model_labels, model_pred, average='micro'))

    if (CHECKPOINT_PATH_TARGET):
        print(accuracy_score(model_labels, model_target_pred))
        print(f1_score(model_labels, model_target_pred, average='micro'))
        tp = 0
        fp = 0
        tn = 0
        fn = 0
        for i, _ in enumerate(model_pred_bin):
            if model_pred_bin[i] == model_target_pred_bin[i]:
                if model_pred_bin[i] == 1:
                    tp += 1
                else:
                    tn += 1
            else:
                if model_pred_bin[i] == 0:
                    fp += 1
                else:
                    fn += 1

        print(f"True positive = {tp}")
        print(f"False positive = {fp}")
        print(f"False negative = {fn}")
        print(f"True negative = {tn}")

        print("Finish testing")
示例#28
0
def get_data(data_path):
    with open(data_path, 'rb') as f:
        train_test_paths_labels = pickle.load(f)
    train_paths = train_test_paths_labels[0]
    val_paths = train_test_paths_labels[1]
    test_paths = train_test_paths_labels[2]
    train_labels = train_test_paths_labels[3]
    val_labels = train_test_paths_labels[4]
    test_labels = train_test_paths_labels[5]
    train_num_each = train_test_paths_labels[6]
    val_num_each = train_test_paths_labels[7]
    test_num_each = train_test_paths_labels[8]

    print('train_paths  : {:6d}'.format(len(train_paths)))
    print('train_labels : {:6d}'.format(len(train_labels)))
    print('valid_paths  : {:6d}'.format(len(val_paths)))
    print('valid_labels : {:6d}'.format(len(val_labels)))
    print('test_paths   : {:6d}'.format(len(test_paths)))
    print('test_labels  : {:6d}'.format(len(test_labels)))

    train_labels = np.asarray(train_labels, dtype=np.int64)
    val_labels = np.asarray(val_labels, dtype=np.int64)
    test_labels = np.asarray(test_labels, dtype=np.int64)

    if use_flip == 0:
        train_transforms = transforms.Compose([
            RandomCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.3456, 0.2281, 0.2233],
                                 [0.2528, 0.2135, 0.2104])
        ])
    elif use_flip == 1:
        train_transforms = transforms.Compose([
            RandomCrop(224),
            RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.3456, 0.2281, 0.2233],
                                 [0.2528, 0.2135, 0.2104])
        ])

    if crop_type == 0:
        test_transforms = transforms.Compose([
            RandomCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.3456, 0.2281, 0.2233],
                                 [0.2528, 0.2135, 0.2104])
        ])
    elif crop_type == 1:
        test_transforms = transforms.Compose([
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.3456, 0.2281, 0.2233],
                                 [0.2528, 0.2135, 0.2104])
        ])
    elif crop_type == 5:
        test_transforms = transforms.Compose([
            transforms.FiveCrop(224),
            Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])),
            Lambda(lambda crops: torch.stack([
                transforms.Normalize([0.3456, 0.2281, 0.2233],
                                     [0.2528, 0.2135, 0.2104])(crop)
                for crop in crops
            ]))
        ])
    elif crop_type == 10:
        test_transforms = transforms.Compose([
            transforms.TenCrop(224),
            Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])),
            Lambda(lambda crops: torch.stack([
                transforms.Normalize([0.3456, 0.2281, 0.2233],
                                     [0.2528, 0.2135, 0.2104])(crop)
                for crop in crops
            ]))
        ])

    train_dataset = CholecDataset(train_paths, train_labels, train_transforms)
    val_dataset = CholecDataset(val_paths, val_labels, test_transforms)
    test_dataset = CholecDataset(test_paths, test_labels, test_transforms)

    return train_dataset, train_num_each, val_dataset, val_num_each, test_dataset, test_num_each
示例#29
0
def main(args):
    #
    # Image preprocessing
    if args.feature_type == 'plain':
        if args.extractor == 'resnet152caffe-original':
            # Use custom transform:
            transform = transforms.Compose([
                transforms.Resize((args.crop_size, args.crop_size)),
                # Swap color space from RGB to BGR and subtract caffe-specific
                # channel values from each pixel
                transforms.Lambda(lambda img: np.array(img, dtype=np.float32)[
                    ..., [2, 1, 0]] - [103.939, 116.779, 123.68]),
                # Create a torch tensor and put channels first:
                transforms.Lambda(
                    lambda img: torch.from_numpy(img).permute(2, 0, 1)),
                # Cast tensor to correct type:
                transforms.Lambda(lambda img: img.type('torch.FloatTensor'))
            ])

        else:
            # Default transform
            transform = transforms.Compose([
                transforms.Resize((args.crop_size, args.crop_size)),
                transforms.ToTensor(),
                transforms.Normalize((0.485, 0.456, 0.406),
                                     (0.229, 0.224, 0.225))
            ])
    elif args.feature_type == 'avg' or args.feature_type == 'max':
        # Try with no normalization
        # Try with subtracting 0.5 from all values
        # See example here: https://pytorch.org/docs/stable/torchvision/transforms.html

        if args.normalize == 'default':
            transform = transforms.Compose([
                transforms.Resize((args.image_size, args.image_size)),
                # 10-crop implementation as described in PyTorch documentation:
                transforms.TenCrop((args.crop_size, args.crop_size)),
                # Apply next two transforms to each crop in turn and then stack them
                # to a single tensor:
                transforms.Lambda(lambda crops: torch.stack([
                    transforms.Normalize((0.485, 0.456, 0.406),
                                         (0.229, 0.224, 0.225))
                    (transforms.ToTensor()(crop)) for crop in crops
                ]))
            ])
        elif args.normalize == 'skip':
            transform = transforms.Compose([
                transforms.Resize((args.image_size, args.image_size)),
                transforms.TenCrop((args.crop_size, args.crop_size)),
                transforms.Lambda(lambda crops: torch.stack(
                    [transforms.ToTensor()(crop) for crop in crops]))
            ])
        elif args.normalize == 'subtract_half':
            transform = transforms.Compose([
                transforms.Resize((args.image_size, args.image_size)),
                transforms.TenCrop((args.crop_size, args.crop_size)),
                transforms.Lambda(lambda crops: torch.stack(
                    [transforms.ToTensor()(crop) for crop in crops]) - 0.5)
            ])
        else:
            print("Invalid normalization parameter")
            sys.exit(1)

    else:
        print("Invalid feature type specified {}".args.feature_type)
        sys.exit(1)

    print("Creating features of type: {}".format(args.feature_type))

    # Get dataset parameters and vocabulary wrapper:
    dataset_configs = DatasetParams(args.dataset_config_file)
    dataset_params = dataset_configs.get_params(args.dataset)

    # We want to only get the image file name, not the full path:
    for i in dataset_params:
        i.config_dict['return_image_file_name'] = True

    # We ask it to iterate over images instead of all (image, caption) pairs
    data_loader, _ = get_loader(dataset_params,
                                vocab=None,
                                transform=transform,
                                batch_size=args.batch_size,
                                shuffle=False,
                                num_workers=args.num_workers,
                                ext_feature_sets=None,
                                skip_images=False,
                                iter_over_images=True)

    extractor = FeatureExtractor(args.extractor, True).to(device).eval()

    # To open an lmdb handle and prepare it for the right size
    # it needs to fit the total number of elements in the dataset
    # so we set a map_size to a largish value here:
    map_size = 1e12

    lmdb_path = None
    file_name = None

    if args.output_file:
        file_name = args.output_file
    else:
        file_name = '{}-{}-{}-normalize-{}.lmdb'.format(
            args.dataset, args.extractor, args.feature_type, args.normalize)

    os.makedirs(args.output_dir, exist_ok=True)

    lmdb_path = os.path.join(args.output_dir, file_name)

    # Check that we are not overwriting anything
    if os.path.exists(lmdb_path):
        print(
            'ERROR: {} exists, please remove it first if you really want to replace it.'
            .format(lmdb_path))
        sys.exit(1)

    print("Preparing to store extracted features to {}...".format(lmdb_path))

    print("Starting to extract features from dataset {} using {}...".format(
        args.dataset, args.extractor))
    show_progress = sys.stderr.isatty()

    # If feature shape is not 1-dimensional, store feature shape metadata:
    if isinstance(extractor.output_dim, np.ndarray):
        with lmdb.open(lmdb_path, map_size=map_size) as env:
            with env.begin(write=True) as txn:
                txn.put(str('@vdim').encode('ascii'), extractor.output_dim)

    for i, (images, _, _, image_ids,
            _) in enumerate(tqdm(data_loader, disable=not show_progress)):

        images = images.to(device)

        # If we are dealing with cropped images, image dimensions are: bs, ncrops, c, h, w
        if images.dim() == 5:
            bs, ncrops, c, h, w = images.size()
            # fuse batch size and ncrops:
            raw_features = extractor(images.view(-1, c, h, w))

            if args.feature_type == 'avg':
                # Average over crops:
                features = raw_features.view(bs, ncrops,
                                             -1).mean(1).data.cpu().numpy()
            elif args.feature_type == 'max':
                # Max over crops:
                features = raw_features.view(bs, ncrops,
                                             -1).max(1)[0].data.cpu().numpy()
        # Otherwise our image dimensions are bs, c, h, w
        else:
            features = extractor(images).data.cpu().numpy()

        # Write to LMDB object:
        with lmdb.open(lmdb_path, map_size=map_size) as env:
            with env.begin(write=True) as txn:
                for j, image_id in enumerate(image_ids):

                    # If output dimension is not a scalar, flatten the array.
                    # When retrieving this feature from the LMDB, developer must take
                    # care to reshape the feature back to the correct dimensions!
                    if isinstance(extractor.output_dim, np.ndarray):
                        _feature = features[j].flatten()
                    # Otherwise treat it as is:
                    else:
                        _feature = features[j]

                    txn.put(str(image_id).encode('ascii'), _feature)

        # Print log info
        if not show_progress and ((i + 1) % args.log_step == 0):
            print('Batch [{}/{}]'.format(i + 1, len(data_loader)))
            sys.stdout.flush()
def data_loader(args):

    mean_vals = [0.485, 0.456, 0.406]
    std_vals = [0.229, 0.224, 0.225]

    tsfm_train = transforms.Compose([
        transforms.Resize((args.resize_size, args.resize_size)),
        transforms.RandomCrop(args.crop_size),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean_vals, std_vals)
    ])

    if args.tencrop == 'True':
        func_transforms = [
            transforms.Resize(args.resize_size),
            transforms.TenCrop(args.crop_size),
            transforms.Lambda(lambda crops: torch.stack([
                transforms.Normalize(mean_vals, std_vals)
                (transforms.ToTensor()(crop)) for crop in crops
            ])),
        ]
    else:
        func_transforms = []

        # print input_size, crop_size
        if args.resize_size == 0 or args.crop_size == 0:
            pass
        else:
            func_transforms.append(
                transforms.Resize((args.resize_size, args.resize_size)))
            func_transforms.append(transforms.CenterCrop(args.crop_size))

        func_transforms.append(transforms.ToTensor())
        func_transforms.append(transforms.Normalize(mean_vals, std_vals))

    tsfm_test = transforms.Compose(func_transforms)

    if args.dataset == 'ILSVRC':
        with_image = False
    else:
        with_image = True
    img_train = CUBClsDataset(root=args.data,
                              datalist=args.train_list,
                              transform=tsfm_train,
                              with_image=with_image)
    img_test = CUBCamDataset(root=args.data,
                             datalist=args.test_list,
                             transform=tsfm_test,
                             with_image=with_image)

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            img_train)
    else:
        train_sampler = None

    train_loader = DataLoader(img_train,
                              batch_size=args.batch_size,
                              shuffle=(train_sampler is None),
                              sampler=train_sampler,
                              num_workers=args.workers)

    val_loader = DataLoader(img_test,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.workers)

    return train_loader, val_loader, train_sampler