Beispiel #1
0
    def __init__(self, img_dir, mask_dir):
        self.size = (128,128)
#        self.root = root
        if not os.path.exists(img_dir):
            raise Exception("[!] {} not exists.".format(img_dir))
        if not os.path.exists(mask_dir):
            raise Exception("[!] {} not exists.".format(mask_dir))
        self.img_resize = Compose([
            Scale(self.size, Image.BILINEAR),
            # We can do some colorjitter augmentation here
            # ColorJitter(brightness=0, contrast=0, saturation=0, hue=0),
        ])
        self.label_resize = Compose([
            Scale(self.size, Image.NEAREST),
        ])
        self.img_transform = Compose([
            ToTensor(),
            Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]),
        ])
        self.hsv_transform = Compose([
            ToTensor(),
        ])
        self.label_transform = Compose([
            ToLabel(),
            ReLabelBinary(127),
        ])
        #sort file names
        self.input_paths = sorted(glob('{}/*.jpg'.format(img_dir)))
        self.label_paths = sorted(glob('{}/*.jpg'.format(mask_dir)))
        #self.name = os.path.basename(root)
        if len(self.input_paths) == 0 or len(self.label_paths) == 0:
            raise Exception("No images/labels are found in {}")
Beispiel #2
0
    def __call__(self, input, target):
        # do something to both images
        input = Resize(self.height, Image.BILINEAR)(input)
        target = Resize(self.height, Image.NEAREST)(target)

        if (self.augment):
            # Random hflip
            hflip = random.random()
            if (hflip < 0.5):
                input = input.transpose(Image.FLIP_LEFT_RIGHT)
                target = target.transpose(Image.FLIP_LEFT_RIGHT)

            #Random translation 0-2 pixels (fill rest with padding
            transX = random.randint(-2, 2)
            transY = random.randint(-2, 2)

            input = ImageOps.expand(input,
                                    border=(transX, transY, 0, 0),
                                    fill=0)
            target = ImageOps.expand(target,
                                     border=(transX, transY, 0, 0),
                                     fill=255)  #pad label filling with 255
            input = input.crop(
                (0, 0, input.size[0] - transX, input.size[1] - transY))
            target = target.crop(
                (0, 0, target.size[0] - transX, target.size[1] - transY))

        input = ToTensor()(input)
        if (self.enc):
            target = Resize(int(self.height / 8), Image.NEAREST)(target)
        target = ToLabel()(target)
        target = Relabel(255, 19)(target)

        return input, target
Beispiel #3
0
    def __init__(self, root):
        size = (128,128)
        self.root = root
        if not os.path.exists(self.root):
            raise Exception("[!] {} not exists.".format(root))
        self.img_transform = Compose([
            Scale(size, Image.BILINEAR),
            ToTensor(),
            Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]),

        ])
        self.hsv_transform = Compose([
            Scale(size, Image.BILINEAR),
            ToTensor(),
        ])
        self.label_transform = Compose([
            Scale(size, Image.NEAREST),
            ToLabel(),
            ReLabel(255, 1),
        ])
        #sort file names
        self.input_paths = sorted(glob(os.path.join(self.root, '{}/*.jpg'.format("ISIC-2017_Test_v2_Data"))))
        self.label_paths = sorted(glob(os.path.join(self.root, '{}/*.png'.format("ISIC-2017_Test_v2_Part1_GroundTruth"))))
        #self.name = os.path.basename(root)
        if len(self.input_paths) == 0 or len(self.label_paths) == 0:
            raise Exception("No images/labels are found in {}".format(self.root))
Beispiel #4
0
    def __getitem__(self, index):
        filename = self.filenames[index]

        with open(image_path(self.images_root, filename, '.jpg'), 'rb') as f:
            image = load_image(f).convert('RGB')
        with open(image_path(self.labels_root, filename, '.png'), 'rb') as f:
            label = load_image(f).convert('P')

        if self.input_transform is None and self.target_transform is None:
            tw, th = 256, 256
            # tw = random.randint(image.size[0]//2, image.size[0])
            # th = random.randint(image.size[1]//2, image.size[1])

            padding = (max(0, tw - image.size[0]), max(0, th - image.size[1]))
            image = F.pad(image, padding)

            iw, ih = image.size[0], image.size[1]

            if iw == tw and tw == th:
                bi, bj = 0, 0
            else:
                bi = random.randint(0, ih - th)
                bj = random.randint(0, iw - tw)

            self.input_transform = Compose([
                Crop(bi, bj, th, tw),
                ToTensor(),
                Normalize([.485, .456, .406], [.229, .224, .225]),
            ])
            self.target_transform = Compose([
                Crop(bi, bj, th, tw),
                ToLabel(),
                Relabel(255, 0),
            ])

        if self.input_transform is not None:
            image = self.input_transform(image)
        if self.target_transform is not None:
            label = self.target_transform(label)

        return image, label
Beispiel #5
0
def validate(args):
    # Setup Dataloader
    if args.arch == 'nasnetalarge':
        normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
                                    std=[0.5, 0.5, 0.5])
    else:
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                    std=[0.229, 0.224, 0.225])
        img_transform = transforms.Compose([
            # transforms.RandomHorizontalFlip(),
            # transforms.RandomRotation((-30,30)),
            # transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.1, hue=0.1),
            transforms.Scale((256,256)),
            # transforms.RandomResizedCrop((256)),
            # transforms.RandomResizedCrop((382),scale=(0.5, 2.0)),
            transforms.ToTensor(),
            normalize,
        ])

    label_transform = transforms.Compose([
            ToLabel(),
            # normalize,
        ])
    if args.dataset == 'pascal':
        num_labels = 20
        loader = pascalVOCLoader('/share/data/vision-greg/mlfeatsdata/CV_Course/', split=args.split, img_transform = img_transform , label_transform = label_transform)
    elif args.dataset == 'coco':
        num_labels = 80
        loader = cocoloader('/share/data/vision-greg/mlfeatsdata/Pytorch/sharedprojects/NIPS-2019/data-convertor/', split=args.split,img_transform = img_transform , label_transform = label_transform)
    else:
        raise AssertionError
    n_classes = loader.n_classes
    valloader = data.DataLoader(loader, batch_size=args.batch_size, num_workers=4, shuffle=True)

    print(len(loader))
    print(normalize)
    print(args.arch)

    orig_resnet = torchvision.models.resnet101(pretrained=True)
    features = list(orig_resnet.children())
    model= nn.Sequential(*features[0:8])
    clsfier = clssimp(2048,n_classes)

    model.load_state_dict(torch.load('savedmodels/' + args.arch + str(args.disc) +  ".pth"))
    clsfier.load_state_dict(torch.load('savedmodels/' + args.arch +'clssegsimp' + str(args.disc) +  ".pth"))

    model.eval()
    clsfier.eval()
    print(len(loader))

    if torch.cuda.is_available():
        model.cuda(0)
        clsfier.cuda(0)

    model.eval()
    gts = {i:[] for i in range(0,num_labels)}
    preds = {i:[] for i in range(0,num_labels)}
    # gts, preds = [], []
    softmax = nn.Softmax2d()
    for i, (images, labels) in tqdm(enumerate(valloader)):
        if torch.cuda.is_available():
            images = Variable(images[0].cuda(0))
            labels = Variable(labels[0].cuda(0).float())
   
        else:
            images = Variable(images[0])
            labels = Variable(labels[0]) 

        # outputs  = softmax(segcls(model(images)))
        
        outputs  = model(images)
        outputs = clsfier(outputs)
        outputs = F.sigmoid(outputs)
        pred = outputs.squeeze().data.cpu().numpy()
        gt = labels.squeeze().data.cpu().numpy()
        #print(gt.shape)
        
        for label in range(0,num_labels):
            gts[label].append(gt[label])
            preds[label].append(pred[label])
        # for gt_, pred_ in zip(gt, pred):
        #     gts.append(gt_)
        #     preds.append(pred_)

    FinalMAPs = []
    
    for i in range(0,num_labels):
        precision, recall, thresholds = metrics.precision_recall_curve(gts[i], preds[i]);
        FinalMAPs.append(metrics.auc(recall , precision));
    print(FinalMAPs)
    print(np.mean(FinalMAPs))
Beispiel #6
0
BATCH_SIZE  = 1                 # Num of training images per step
NUM_WORKERS = 1                 # Num of workers
MODEL_NAME   = 'trained-models/state'   # Name of the model to save
DATASET_PATH = 'datasets/VOC2012'       # Path of the dataset

cuda_enabled = torch.cuda.is_available()
print("CUDA_ENABLED: ", cuda_enabled)

input_transform = Compose([
    CenterCrop(256),
    ToTensor(),
    Normalize([.485, .456, .406], [.229, .224, .225]),
])
target_transform = Compose([
    CenterCrop(256),
    ToLabel(),
    Relabel(255, 21),
])

# SegNet, FCN8, FCN16, FCN32, PSPNet, UNet
from networks.SegNet import *
Net = SegNet

model = Net(NUM_CLASSES)
if cuda_enabled:
    model = model.cuda()

model.train()

loader = DataLoader(
    VOCTrain(DATASET_PATH, input_transform, target_transform), 
Beispiel #7
0
def train(args):
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    if args.augmix:
        train_transform = transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.RandomResizedCrop((args.img_size), scale=(0.5, 2.0)),
        ])
    elif args.speckle:
        train_transform = transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.RandomResizedCrop((args.img_size), scale=(0.5, 2.0)),
            transforms.ToTensor(),
            transforms.RandomApply(
                [transforms.Lambda(lambda x: speckle_noise_torch(x))], p=0.5),
            normalize,
        ])
    else:
        train_transform = transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.RandomResizedCrop((args.img_size), scale=(0.5, 2.0)),
            transforms.ToTensor(),
            normalize,
        ])
    if args.cutout:
        train_transform.transforms.append(transforms.RandomErasing())

    val_transform = transforms.Compose([
        transforms.Scale((args.img_size, args.img_size)),
        transforms.ToTensor(),
        normalize,
    ])

    label_transform = transforms.Compose([
        ToLabel(),
    ])
    print("Loading Data")
    if args.dataset == "deepfashion2":
        loader = fashion2loader(
            "../",
            transform=train_transform,
            label_transform=label_transform,
            #scales=(-1), occlusion=(-1), zoom=(-1), viewpoint=(-1), negate=(True,True,True,True),
            scales=args.scales,
            occlusion=args.occlusion,
            zoom=args.zoom,
            viewpoint=args.viewpoint,
            negate=args.negate,
            #load=True,
        )
        if args.augmix:
            loader = AugMix(loader, args.augmix)
        if args.stylize:
            style_loader = fashion2loader(
                root="../../stylize-datasets/output/",
                transform=train_transform,
                label_transform=label_transform,
                #scales=(-1), occlusion=(-1), zoom=(-1), viewpoint=(-1), negate=(True,True,True,True),
                scales=args.scales,
                occlusion=args.occlusion,
                zoom=args.zoom,
                viewpoint=args.viewpoint,
                negate=args.negate,
                #load=True,
            )
            loader = torch.utils.data.ConcatDataset([loader, style_loader])
        valloader = fashion2loader(
            "../",
            split="validation",
            transform=val_transform,
            label_transform=label_transform,
            #scales=(-1), occlusion=(-1), zoom=(-1), viewpoint=(-1), negate=(True,True,True,True),
            scales=args.scales,
            occlusion=args.occlusion,
            zoom=args.zoom,
            viewpoint=args.viewpoint,
            negate=args.negate,
        )
    elif args.dataset == "deepaugment":
        loader = fashion2loader(
            "../",
            transform=train_transform,
            label_transform=label_transform,
            #scales=(-1), occlusion=(-1), zoom=(-1), viewpoint=(-1), negate=(True,True,True,True),
            scales=args.scales,
            occlusion=args.occlusion,
            zoom=args.zoom,
            viewpoint=args.viewpoint,
            negate=args.negate,
            #load=True,
        )
        loader1 = fashion2loader(
            root="../../deepaugment/EDSR/",
            transform=train_transform,
            label_transform=label_transform,
            #scales=(-1), occlusion=(-1), zoom=(-1), viewpoint=(-1), negate=(True,True,True,True),
            scales=args.scales,
            occlusion=args.occlusion,
            zoom=args.zoom,
            viewpoint=args.viewpoint,
            negate=args.negate,
            #load=True,
        )
        loader2 = fashion2loader(
            root="../../deepaugment/CAE/",
            transform=train_transform,
            label_transform=label_transform,
            #scales=(-1), occlusion=(-1), zoom=(-1), viewpoint=(-1), negate=(True,True,True,True),
            scales=args.scales,
            occlusion=args.occlusion,
            zoom=args.zoom,
            viewpoint=args.viewpoint,
            negate=args.negate,
            #load=True,
        )
        loader = torch.utils.data.ConcatDataset([loader, loader1, loader2])
        if args.augmix:
            loader = AugMix(loader, args.augmix)
        if args.stylize:
            style_loader = fashion2loader(
                root="../../stylize-datasets/output/",
                transform=train_transform,
                label_transform=label_transform,
                #scales=(-1), occlusion=(-1), zoom=(-1), viewpoint=(-1), negate=(True,True,True,True),
                scales=args.scales,
                occlusion=args.occlusion,
                zoom=args.zoom,
                viewpoint=args.viewpoint,
                negate=args.negate,
                #load=True,
            )
            loader = torch.utils.data.ConcatDataset([loader, style_loader])
        valloader = fashion2loader(
            "../",
            split="validation",
            transform=val_transform,
            label_transform=label_transform,
            #scales=(-1), occlusion=(-1), zoom=(-1), viewpoint=(-1), negate=(True,True,True,True),
            scales=args.scales,
            occlusion=args.occlusion,
            zoom=args.zoom,
            viewpoint=args.viewpoint,
            negate=args.negate,
        )

    else:
        raise AssertionError
    print("Loading Done")

    n_classes = args.num_classes
    train_loader = data.DataLoader(loader,
                                   batch_size=args.batch_size,
                                   num_workers=args.num_workers,
                                   drop_last=True,
                                   shuffle=True)

    print("number of images = ", len(train_loader))
    print("number of classes = ", n_classes)

    print("Loading arch = ", args.arch)
    if args.arch == "resnet101":
        orig_resnet = torchvision.models.resnet101(pretrained=True)
        features = list(orig_resnet.children())
        model = nn.Sequential(*features[0:8])
        clsfier = clssimp(2048, n_classes)
    elif args.arch == "resnet50":
        orig_resnet = torchvision.models.resnet50(pretrained=True)
        features = list(orig_resnet.children())
        model = nn.Sequential(*features[0:8])
        clsfier = clssimp(2048, n_classes)
    elif args.arch == "resnet152":
        orig_resnet = torchvision.models.resnet152(pretrained=True)
        features = list(orig_resnet.children())
        model = nn.Sequential(*features[0:8])
        clsfier = clssimp(2048, n_classes)
    elif args.arch == "se":
        model = se_resnet50(pretrained=True)
        features = list(model.children())
        model = nn.Sequential(*features[0:8])
        clsfier = clssimp(2048, n_classes)
    elif args.arch == "BiT-M-R50x1":
        model = bit_models.KNOWN_MODELS[args.arch](head_size=2048,
                                                   zero_head=True)
        model.load_from(np.load(f"{args.arch}.npz"))
        features = list(model.children())
        model = nn.Sequential(*features[0:8])
        clsfier = clssimp(2048, n_classes)
    elif args.arch == "BiT-M-R101x1":
        model = bit_models.KNOWN_MODELS[args.arch](head_size=2048,
                                                   zero_head=True)
        model.load_from(np.load(f"{args.arch}.npz"))
        features = list(model.children())
        model = nn.Sequential(*features[0:8])
        clsfier = clssimp(2048, n_classes)

    if args.load == 1:
        model.load_state_dict(
            torch.load(args.save_dir + args.arch + str(args.disc) + ".pth"))
        clsfier.load_state_dict(
            torch.load(args.save_dir + args.arch + "clssegsimp" +
                       str(args.disc) + ".pth"))

    gpu_ids = os.environ["CUDA_VISIBLE_DEVICES"].split(",")
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    use_dataparallel = len(gpu_ids) > 1
    print("using data parallel = ", use_dataparallel, device, gpu_ids)
    if use_dataparallel:
        gpu_ids = [int(x) for x in range(len(gpu_ids))]
        model = nn.DataParallel(model, device_ids=gpu_ids)
        clsfier = nn.DataParallel(clsfier, device_ids=gpu_ids)
    model.to(device)
    clsfier.to(device)

    if args.finetune:
        if args.opt == "adam":
            optimizer = torch.optim.Adam([{
                'params': clsfier.parameters()
            }],
                                         lr=args.lr)
        else:
            optimizer = torch.optim.SGD(clsfier.parameters(),
                                        args.lr,
                                        momentum=args.momentum,
                                        weight_decay=args.weight_decay,
                                        nesterov=True)
    else:
        if args.opt == "adam":
            optimizer = torch.optim.Adam([{
                'params': model.parameters(),
                'lr': args.lr / 10
            }, {
                'params': clsfier.parameters()
            }],
                                         lr=args.lr)
        else:
            optimizer = torch.optim.SGD(itertools.chain(
                model.parameters(), clsfier.parameters()),
                                        args.lr,
                                        momentum=args.momentum,
                                        weight_decay=args.weight_decay,
                                        nesterov=True)

    def cosine_annealing(step, total_steps, lr_max, lr_min):
        return lr_min + (lr_max - lr_min) * 0.5 * (
            1 + np.cos(step / total_steps * np.pi))

    if args.use_scheduler:
        scheduler = torch.optim.lr_scheduler.LambdaLR(
            optimizer,
            lr_lambda=lambda step: cosine_annealing(
                step,
                args.n_epochs * len(train_loader),
                1,  # since lr_lambda computes multiplicative factor
                1e-6 / (args.lr * args.batch_size / 256.)))

    bceloss = nn.BCEWithLogitsLoss()
    for epoch in range(args.n_epochs):
        for i, (images, labels) in enumerate(tqdm(train_loader)):
            if args.augmix:
                x_mix1, x_orig = images
                images = torch.cat((x_mix1, x_orig), 0).to(device)
            else:
                images = images[0].to(device)
            labels = labels.to(device).float()

            optimizer.zero_grad()

            outputs = model(images)
            outputs = clsfier(outputs)
            if args.augmix:
                l_mix1, outputs = torch.split(outputs, x_orig.size(0))

            if args.loss == "bce":
                if args.augmix:
                    if random.random() > 0.5:
                        loss = bceloss(outputs, labels)
                    else:
                        loss = bceloss(l_mix1, labels)
                else:
                    loss = bceloss(outputs, labels)
            else:
                print("Invalid loss please use --loss bce")
                exit()

            loss.backward()
            optimizer.step()
            if args.use_scheduler:
                scheduler.step()

        print(len(train_loader))
        print("Epoch [%d/%d] Loss: %.4f" %
              (epoch + 1, args.n_epochs, loss.data))

        save_root = os.path.join(args.save_dir, args.arch)
        if not os.path.exists(save_root):
            os.makedirs(save_root)
        if use_dataparallel:
            torch.save(model.module.state_dict(),
                       os.path.join(save_root,
                                    str(args.disc) + ".pth"))
            torch.save(
                clsfier.module.state_dict(),
                os.path.join(save_root,
                             "clssegsimp" + str(args.disc) + ".pth"))
        else:
            torch.save(model.state_dict(),
                       os.path.join(save_root,
                                    str(args.disc) + ".pth"))
            torch.save(
                clsfier.state_dict(),
                os.path.join(save_root,
                             'clssegsimp' + str(args.disc) + ".pth"))
Beispiel #8
0
def train(args):
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    img_transform = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        # transforms.RandomRotation((-30,30)),
        # transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.1, hue=0.1),
        # transforms.Scale((256,256)),
        # transforms.RandomResizedCrop((256)),
        transforms.RandomResizedCrop((256), scale=(0.5, 2.0)),
        transforms.ToTensor(),
        normalize,
    ])

    label_transform = transforms.Compose([
        ToLabel(),
    ])

    if args.dataset == "pascal":
        loader = pascalVOCLoader("./datasets/pascal/",
                                 img_transform=img_transform,
                                 label_transform=label_transform)
    elif args.dataset == "coco":
        loader = cocoloader("./datasets/coco/",
                            img_transform=img_transform,
                            label_transform=label_transform)
    else:
        raise AssertionError
    n_classes = loader.n_classes
    trainloader = data.DataLoader(loader,
                                  batch_size=args.batch_size,
                                  num_workers=4,
                                  shuffle=True)

    print("number of images = ", len(loader))
    print("number of classes = ", n_classes, " architecture used = ",
          args.arch)

    orig_resnet = torchvision.models.resnet101(pretrained=True)
    features = list(orig_resnet.children())
    model = nn.Sequential(*features[0:8])
    clsfier = clssimp(2048, n_classes)

    if args.load == 1:
        model.load_state_dict(
            torch.load('savedmodels/' + args.arch + str(args.disc) + ".pth"))
        clsfier.load_state_dict(
            torch.load('savedmodels/' + args.arch + 'clssegsimp' +
                       str(args.disc) + ".pth"))

    if torch.cuda.is_available():
        model.cuda(0)
        clsfier.cuda(0)

    freeze_bn_affine = 1
    for m in model.modules():
        if isinstance(m, nn.BatchNorm2d):
            m.eval()
            if freeze_bn_affine:
                m.weight.requires_grad = False
                m.bias.requires_grad = False

    optimizer = torch.optim.Adam([{
        'params': model.parameters(),
        'lr': args.l_rate / 10
    }, {
        'params': clsfier.parameters()
    }],
                                 lr=args.l_rate)
    # optimizer = torch.optim.Adam([{'params': clsfier.parameters()}], lr=args.l_rate)

    bceloss = nn.BCEWithLogitsLoss()
    for epoch in range(args.n_epoch):
        for i, (images, labels) in enumerate(trainloader):
            if torch.cuda.is_available():
                images = Variable(images[0].cuda(0))
                labels = Variable(labels[0].cuda(0).float())
            else:
                images = Variable(images[0])
                labels = Variable(labels[0]) - 1

            # iterartion = len(trainloader)*epoch + i
            # poly_lr_scheduler(optimizer, args.l_rate, iteration)
            optimizer.zero_grad()

            outputs = model(images)
            outputs = clsfier(outputs)
            loss = bceloss(outputs, labels)  #-- pascal labels

            loss.backward()
            optimizer.step()

            if (i + 1) % 20 == 0:
                print("Epoch [%d/%d] Loss: %.4f" %
                      (epoch + 1, args.n_epoch, loss.data))

        torch.save(model.state_dict(),
                   args.save_dir + args.arch + str(args.disc) + ".pth")
        torch.save(
            clsfier.state_dict(),
            args.save_dir + args.arch + 'clssegsimp' + str(args.disc) + ".pth")
Beispiel #9
0
def validate(args):
    # Setup Dataloader
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    val_transform = transforms.Compose([
        transforms.Scale((args.img_size, args.img_size)),
        transforms.ToTensor(),
        normalize,
    ])

    label_transform = transforms.Compose([
        ToLabel(),
        # normalize,
    ])
    if args.dataset == "deepfashion2":
        if not args.concat_data:
            valloader = fashion2loader(
                "../",
                split="validation",
                transform=val_transform,
                label_transform=label_transform,
                #scales=(-1), occlusion=(-1), zoom=(-1), viewpoint=(-1),
                scales=args.scales,
                occlusion=args.occlusion,
                zoom=args.zoom,
                viewpoint=args.viewpoint,
                negate=args.negate,
            )
        else:  # lets concat train and val for appropriate labels
            loader1 = fashion2loader(
                "../",
                transform=val_transform,
                label_transform=label_transform,
                #scales=(-1), occlusion=(-1), zoom=(-1), viewpoint=(-1), negate=True,
                scales=args.scales,
                occlusion=args.occlusion,
                zoom=args.zoom,
                viewpoint=args.viewpoint,
                negate=args.negate,
                #load=True,
            )
            loader2 = fashion2loader(
                "../",
                split="validation",
                transform=val_transform,
                label_transform=label_transform,
                #scales=(-1), occlusion=(-1), zoom=(-1), viewpoint=(-1), negate=True,
                scales=args.scales,
                occlusion=args.occlusion,
                zoom=args.zoom,
                viewpoint=args.viewpoint,
                negate=args.negate,
            )
            valloader = torch.utils.data.ConcatDataset([loader1, loader2])
    else:
        raise AssertionError

    n_classes = args.num_classes
    valloader = data.DataLoader(valloader,
                                batch_size=args.batch_size,
                                num_workers=4,
                                shuffle=False)

    print("Number of samples = ", len(valloader))
    print("Loading arch = ", args.arch)

    if args.arch == 'resnet101':
        orig_resnet = torchvision.models.resnet101(pretrained=True)
        features = list(orig_resnet.children())
        model = nn.Sequential(*features[0:8])
        clsfier = clssimp(2048, n_classes)
    elif args.arch == 'resnet50':
        orig_resnet = torchvision.models.resnet50(pretrained=True)
        features = list(orig_resnet.children())
        model = nn.Sequential(*features[0:8])
        clsfier = clssimp(2048, n_classes)
    elif args.arch == 'resnet152':
        orig_resnet = torchvision.models.resnet152(pretrained=True)
        features = list(orig_resnet.children())
        model = nn.Sequential(*features[0:8])
        clsfier = clssimp(2048, n_classes)
    elif args.arch == 'se':
        model = se_resnet50(pretrained=True)
        features = list(model.children())
        model = nn.Sequential(*features[0:8])
        clsfier = clssimp(2048, n_classes)
    elif args.arch == "BiT-M-R50x1":
        model = bit_models.KNOWN_MODELS[args.arch](head_size=2048,
                                                   zero_head=True)
        model.load_from(np.load(f"{args.arch}.npz"))
        features = list(model.children())
        model = nn.Sequential(*features[0:8])
        clsfier = clssimp(2048, n_classes)
    elif args.arch == "BiT-M-R101x1":
        model = bit_models.KNOWN_MODELS[args.arch](head_size=2048,
                                                   zero_head=True)
        model.load_from(np.load(f"{args.arch}.npz"))
        features = list(model.children())
        model = nn.Sequential(*features[0:8])
        clsfier = clssimp(2048, n_classes)

    model.load_state_dict(
        torch.load(args.save_dir + args.arch + "/" + str(args.disc) + ".pth"))
    clsfier.load_state_dict(
        torch.load(args.save_dir + args.arch + "/" + 'clssegsimp' +
                   str(args.disc) + ".pth"))

    model.eval()
    clsfier.eval()

    if torch.cuda.is_available():
        model.cuda(0)
        clsfier.cuda(0)

    model.eval()
    gts = {i: [] for i in range(0, n_classes)}
    preds = {i: [] for i in range(0, n_classes)}
    # gts, preds = [], []
    for i, (images, labels) in tqdm(enumerate(valloader)):
        images = images[0].cuda()
        labels = labels.cuda().float()

        outputs = model(images)
        outputs = clsfier(outputs)
        outputs = F.sigmoid(outputs)
        pred = outputs.data.cpu().numpy()
        gt = labels.data.cpu().numpy()

        for label in range(0, n_classes):
            gts[label].extend(gt[:, label])
            preds[label].extend(pred[:, label])

    FinalMAPs = []
    for i in range(0, n_classes):
        precision, recall, thresholds = metrics.precision_recall_curve(
            gts[i], preds[i])
        FinalMAPs.append(metrics.auc(recall, precision))
    print(FinalMAPs)
    tmp = []
    for i in range(len(gts)):
        tmp.append(gts[i])
    gts = np.array(tmp)

    FinalMAPs = np.array(FinalMAPs)
    denom = gts.sum()
    gts = gts.sum(axis=-1)
    gts = gts / denom
    res = np.nan_to_num(FinalMAPs * gts)
    print((res).sum())
Beispiel #10
0
def validate(args):
    # Setup Dataloader
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    img_transform = transforms.Compose([
        transforms.Scale((256, 256)),
        transforms.ToTensor(),
        normalize,
    ])

    label_transform = transforms.Compose([
        ToLabel(),
        # normalize,
    ])

    if args.dataset == 'pascal':
        loader = pascalVOCLoader('./datasets/pascal/',
                                 split=args.split,
                                 img_transform=img_transform,
                                 label_transform=label_transform)
        test_loader = pascalVOCLoader('./datasets/pascal/',
                                      split="voc12-test",
                                      img_transform=img_transform,
                                      label_transform=None)
    elif args.dataset == 'coco':
        loader = cocoloader('./datasets/coco/',
                            split=args.split,
                            img_transform=img_transform,
                            label_transform=label_transform)
        test_loader = cocoloader('./datasets/coco/',
                                 split="test",
                                 img_transform=img_transform,
                                 label_transform=None)
    else:
        raise AssertionError

    n_classes = loader.n_classes
    valloader = data.DataLoader(loader,
                                batch_size=args.batch_size,
                                num_workers=4,
                                shuffle=False)
    testloader = data.DataLoader(test_loader,
                                 batch_size=args.batch_size,
                                 num_workers=4,
                                 shuffle=False)

    print(len(loader))
    print(normalize)
    print(args.arch)

    orig_resnet = torchvision.models.resnet101(pretrained=True)
    features = list(orig_resnet.children())
    model = nn.Sequential(*features[0:8])
    clsfier = clssimp(2048, n_classes)

    model.load_state_dict(
        torch.load('savedmodels/' + args.arch + str(args.disc) + ".pth"))
    clsfier.load_state_dict(
        torch.load('savedmodels/' + args.arch + 'clssegsimp' + str(args.disc) +
                   ".pth"))

    model.eval()
    clsfier.eval()
    print(len(loader))

    if torch.cuda.is_available():
        model.cuda(0)
        clsfier.cuda(0)

    model.eval()

    save_name = "test" + args.dataset
    in_scores = get_predictions(testloader,
                                model,
                                clsfier,
                                args.ood,
                                name=save_name)

    ood_root = "./datasets/ImageNet22k/images/"
    ood_subfolders = [
        "n02069412", "n02431122", "n02392434", "n02508213", "n01970164",
        "n01937909", "n12641413", "n12649317", "n12285512", "n11978713",
        "n07691650", "n07814390", "n12176953", "n12126084", "n12132956",
        "n12147226", "n12356395", "n12782915", "n02139199", "n01959492"
    ]

    #aurocs = [];
    out_scores = []
    for folder in ood_subfolders:
        root = os.path.join(ood_root, folder)
        imgloader = PlainDatasetFolder(root, transform=img_transform)
        loader = data.DataLoader(imgloader,
                                 batch_size=args.batch_size,
                                 num_workers=4,
                                 shuffle=False)
        save_name = args.dataset + folder
        out_scores.extend(
            get_predictions(loader, model, clsfier, args.ood, name=save_name))

    if args.ood == "lof":
        save_name = "val" + args.dataset
        val_scores = get_predictions(valloader,
                                     model,
                                     clsfier,
                                     args.ood,
                                     name=save_name)

        scores = anom_utils.get_localoutlierfactor_scores(
            val_scores, in_scores, out_scores)
        in_scores = scores[:len(in_scores)]
        out_scores = scores[-len(out_scores):]

    if args.ood == "isol":
        save_name = "val" + args.dataset
        val_scores = get_predictions(valloader,
                                     model,
                                     clsfier,
                                     args.ood,
                                     name=save_name)

        scores = anom_utils.get_isolationforest_scores(val_scores, in_scores,
                                                       out_scores)
        in_scores = scores[:len(in_scores)]
        out_scores = scores[-len(out_scores):]

    #if args.flippin:
    #in_scores = - np.asarray(out_scores)
    #out_scores = - np.asarray(in_scores)

    auroc, aupr, fpr = anom_utils.get_and_print_results(in_scores, out_scores)
    #np.save('./in_scores', in_scores)
    #np.save('./out_scores', out_scores)
    #aurocs.append(auroc); auprs.append(aupr), fprs.append(fpr)
    #print(np.mean(aurocs), np.mean(auprs), np.mean(fprs))
    print("mean auroc = ", np.mean(aurocs), "mean aupr = ", np.mean(auprs),
          " mean fpr = ", np.mean(fprs))