Esempio n. 1
0
def predictAll(model_name, model_class, weight_pth, image_size, normalize):
    print("[+] {0} predictAll.".format(model_name))
    model = get_model(model_class)
    model.load_state_dict(torch.load(weight_pth)['state_dict'])
    model.eval()
    print('{0} load state dict done'.format(model_name))

    tta_preprocess = [preprocess(normalize, image_size), preprocess_hflip(normalize, image_size)]
    tta_preprocess += make_transforms([transforms.Resize((image_size + 20, image_size + 20))],
                                      [transforms.ToTensor(), normalize],
                                      five_crops(image_size))
    tta_preprocess += make_transforms([transforms.Resize((image_size + 20, image_size + 20))],
                                      [HorizontalFlip(), transforms.ToTensor(), normalize],
                                      five_crops(image_size))
    print('[+] tta size: {0}'.format(len(tta_preprocess)))

    data_loaders = []
    for transform in tta_preprocess:
        # test_dataset,_ = split_Dataset(data_dir, ratio, image_size, train_transform=transform)
        test_dataset = MyDataset(test_inputs,test_labels,transform=transform)
        data_loader = DataLoader(dataset=test_dataset, num_workers=16,
                                 batch_size=BATCH_SIZE,
                                 shuffle=False)
        data_loaders.append(data_loader)
        print('add transforms')

    lx, px = utils.predict_tta(model, data_loaders)
    data = {
        'lx': lx.cpu(),
        'px': px.cpu(),
    }
    if not os.path.exists('../feature/'+model_name):
        os.makedirs('../feature/'+model_name)
    torch.save(data, '../feature/'+model_name+'/all_prediction.pth')
    print('{0} Predict Done'.format(model_name))
def preprocess_hflip(normalize, image_size):
    return transforms.Compose([
        transforms.Resize((image_size, image_size)),
        HorizontalFlip(),
        transforms.ToTensor(),
        normalize
    ])
def predict(model_name, model_class, weight_pth, image_size, normalize):
    print(f'[+] predict {model_name}')
    model = get_model(model_class)
    model.load_state_dict(torch.load(weight_pth))
    model.eval()

    tta_preprocess = [
        preprocess(normalize, image_size),
        preprocess_hflip(normalize, image_size)
    ]
    tta_preprocess += make_transforms(
        [transforms.Resize((image_size + 20, image_size + 20))],
        [transforms.ToTensor(), normalize], five_crops(image_size))
    tta_preprocess += make_transforms(
        [transforms.Resize((image_size + 20, image_size + 20))],
        [HorizontalFlip(), transforms.ToTensor(), normalize],
        five_crops(image_size))
    print(f'[+] tta size: {len(tta_preprocess)}')

    data_loaders = []
    for transform in tta_preprocess:
        test_dataset = FurnitureDataset('test', transform=transform)
        data_loader = DataLoader(dataset=test_dataset,
                                 num_workers=1,
                                 batch_size=BATCH_SIZE,
                                 shuffle=False)
        data_loaders.append(data_loader)

    lx, px = utils.predict_tta(model, data_loaders)
    data = {
        'lx': lx.cpu(),
        'px': px.cpu(),
    }
    torch.save(data, f'{model_name}_test_prediction.pth')

    data_loaders = []
    for transform in tta_preprocess:
        test_dataset = FurnitureDataset('val', transform=transform)
        data_loader = DataLoader(dataset=test_dataset,
                                 num_workers=1,
                                 batch_size=BATCH_SIZE,
                                 shuffle=False)
        data_loaders.append(data_loader)

    lx, px = utils.predict_tta(model, data_loaders)
    data = {
        'lx': lx.cpu(),
        'px': px.cpu(),
    }
    torch.save(data, f'{model_name}_val_prediction.pth')
Esempio n. 4
0
    def __len__(self):
        return self.size


from split import StratifiedSplit
splitter = StratifiedSplit()
trn_df, val_df = splitter(pd.read_csv(ROOT / 'train.csv'))
tst_df = pd.read_csv(ROOT / 'test.csv')
stats = build_stats_index(ROOT / 'pixel_stats.csv')

sz = 512
trn_ds = TwoSiteImages(
    ds1=AugmentedImages(ds=RxRxImages(trn_df, ROOT, site=1),
                        tr=composer([
                            HorizontalFlip(p=0.1),
                            VerticalFlip(p=0.1),
                            PixelStatsNorm(stats, channels_first=False),
                        ],
                                    resize=sz,
                                    rescale=False)),
    ds2=AugmentedImages(ds=RxRxImages(trn_df, ROOT, site=2),
                        tr=composer([
                            HorizontalFlip(p=0.1),
                            VerticalFlip(p=0.1),
                            PixelStatsNorm(stats, channels_first=False),
                        ],
                                    resize=sz,
                                    rescale=False)))
val_ds = TwoSiteImages(ds1=AugmentedImages(
    ds=RxRxImages(val_df, ROOT, site=1),
        if self.preffix != "test":
            target = row['label_id'] - 1 if 'label_id' in row else -1
            return img, target
        else:
            return img


normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
preprocess = transforms.Compose([
    transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)),
    transforms.ToTensor(), normalize
])
preprocess_hflip = transforms.Compose([
    transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)),
    HorizontalFlip(),
    transforms.ToTensor(), normalize
])
preprocess_for_test = transforms.Compose([
    transforms.Resize((IMAGE_SIZE + 20, IMAGE_SIZE + 20)),
    transforms.RandomCrop((IMAGE_SIZE, IMAGE_SIZE)),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(), normalize
])
preprocess_with_augmentation = transforms.Compose([
    transforms.Resize((IMAGE_SIZE + 30, IMAGE_SIZE + 30)),
    transforms.RandomCrop((IMAGE_SIZE, IMAGE_SIZE)),
    transforms.RandomHorizontalFlip(),
    transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3),
    transforms.ToTensor(), normalize
])
Esempio n. 6
0
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])

# normalize = transforms.Normalize(
#   mean=[0.5, 0.5, 0.5],
#   std=[0.5, 0.5, 0.5]
# )

preprocess = transforms.Compose([
    transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)),
    transforms.ToTensor(), normalize
])
preprocess_hflip = transforms.Compose([
    transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)),
    HorizontalFlip(),
    transforms.ToTensor(), normalize
])
preprocess_with_augmentation = transforms.Compose([
    transforms.Resize((IMAGE_SIZE + 20, IMAGE_SIZE + 20)),
    transforms.RandomCrop((IMAGE_SIZE, IMAGE_SIZE)),
    transforms.RandomHorizontalFlip(),
    transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3),
    transforms.ToTensor(), normalize
])

preprocess_five_crop = transforms.Compose([
    transforms.Resize((IMAGE_SIZE + 20, IMAGE_SIZE + 20)),
    transforms.FiveCrop(IMAGE_SIZE),  # this is a list of PIL Images
    transforms.Lambda(lambda crops: torch.stack(
        [normalize(transforms.ToTensor()(crop)) for crop in crops]))