VAL_TRANSFORMS = [
    RandomResizedCrop(350, scale=(0.7, 1.0), interpolation=3),
    RandomHorizontalFlip(p=0.5),
    ToTensor(),
    Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
]

BATCH_SIZE = 32
NUM_WORKERS = 15

dataset = FilesFromCsvDataset("output/filtered_train_dataset.csv")
targets = [y - 1 for _, y in dataset]
train_sampler = SmartWeightedRandomSampler(targets)
TRAIN_LOADER = get_data_loader(dataset,
                               data_transform=TRAIN_TRANSFORMS,
                               batch_size=BATCH_SIZE,
                               sampler=train_sampler,
                               num_workers=NUM_WORKERS,
                               cuda=True)

val_dataset = FilesFromCsvDataset("output/filtered_val_dataset.csv")
VAL_LOADER = get_data_loader(val_dataset,
                             data_transform=VAL_TRANSFORMS,
                             batch_size=BATCH_SIZE,
                             num_workers=NUM_WORKERS,
                             cuda=True)

model_checkpoint = (
    Path(OUTPUT_PATH) / "train_inceptionv4_350_smart_sampler_resized_crop" /
    "20180502_0902" /
    "model_FurnitureInceptionV4_350_1_val_loss=0.7726298.pth").as_posix()
MODEL = torch.load(model_checkpoint)
예제 #2
0
VAL_TRANSFORMS = [
    RandomResizedCrop(350, scale=(0.8, 1.0)),
    RandomHorizontalFlip(p=0.5),
    ToTensor(),
    Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]


BATCH_SIZE = 180
NUM_WORKERS = 15


dataset = FilesFromCsvDataset("output/filtered_train_dataset.csv")
TRAIN_LOADER = get_data_loader(dataset,
                               data_transform=TRAIN_TRANSFORMS,
                               batch_size=BATCH_SIZE,
                               num_workers=NUM_WORKERS,
                               pin_memory=True)

val_dataset = FilesFromCsvDataset("output/filtered_val_dataset.csv")
VAL_LOADER = get_data_loader(val_dataset,
                             data_transform=VAL_TRANSFORMS,
                             batch_size=BATCH_SIZE,
                             num_workers=NUM_WORKERS,
                             pin_memory=True)

MODEL = FurnitureSqueezeNet350(pretrained=True)

N_EPOCHS = 100

OPTIM = Adam(
예제 #3
0
he_map = dict([(v, i) for i, v in enumerate(hard_examples)])


def hard_examples_only(target):
    return he_map[target] if target in he_map else len(hard_examples)


dataset = FilesFromCsvDataset("output/unique_filtered_train_dataset.csv")
train_sampler = get_weighted_train_sampler(dataset,
                                           hard_examples_classes_weight,
                                           n_samples=len(dataset))
TRAIN_LOADER = get_data_loader(dataset,
                               data_transform=TRAIN_TRANSFORMS,
                               target_transform=hard_examples_only,
                               sampler=train_sampler,
                               batch_size=BATCH_SIZE,
                               num_workers=NUM_WORKERS,
                               pin_memory=True)

val_dataset = FilesFromCsvDataset("output/unique_filtered_val_dataset.csv")
val_sampler = get_weighted_train_sampler(val_dataset,
                                         hard_examples_classes_weight,
                                         n_samples=len(val_dataset))
VAL_LOADER = get_data_loader(val_dataset,
                             data_transform=VAL_TRANSFORMS,
                             target_transform=hard_examples_only,
                             sampler=val_sampler,
                             batch_size=BATCH_SIZE,
                             num_workers=NUM_WORKERS,
                             pin_memory=True)
예제 #4
0
n_classes = 128
dataset = FilesFromCsvDataset("output/filtered_train_dataset.csv")
dataset = TransformedDataset(dataset,
                             transforms=read_image,
                             target_transforms=lambda l: l - 1)
dataset = TransformedDataset(dataset, transforms=Compose(TRAIN_TRANSFORMS))
dataset = RandomMultiImageAugDataset(dataset,
                                     n_classes,
                                     aug_fn=partial(basic_random_half_blend,
                                                    alpha=0.3))
dataset = TransformedDataset(dataset, transforms=Compose(common_transform))

TRAIN_LOADER = get_data_loader(dataset,
                               data_transform=None,
                               batch_size=BATCH_SIZE,
                               num_workers=NUM_WORKERS,
                               cuda=True)

val_dataset = FilesFromCsvDataset("output/filtered_val_dataset.csv")
VAL_LOADER = get_data_loader(val_dataset,
                             data_transform=VAL_TRANSFORMS + common_transform,
                             batch_size=BATCH_SIZE,
                             num_workers=NUM_WORKERS,
                             cuda=True)

MODEL = FurnitureSqueezeNet350(pretrained=True)

N_EPOCHS = 100

OPTIM = Adam(params=[
BATCH_SIZE = 32
NUM_WORKERS = 8

dataset = TrainvalFilesDataset(DATASET_PATH / "train_400x400")
dataset = TransformedDataset(dataset,
                             transforms=lambda x: x,
                             target_transforms=lambda y: y - 1)
reduced_train_indices = get_reduced_train_indices(dataset,
                                                  max_n_samples_per_class=1250,
                                                  seed=SEED)
del dataset

TRAIN_LOADER = get_data_loader(dataset_path=DATASET_PATH / "train_400x400",
                               data_transform=TRAIN_TRANSFORMS,
                               sample_indices=reduced_train_indices,
                               batch_size=BATCH_SIZE,
                               num_workers=NUM_WORKERS,
                               cuda=True)

VAL_LOADER = get_data_loader(dataset_path=DATASET_PATH / "val_400x400",
                             data_transform=VAL_TRANSFORMS,
                             batch_size=BATCH_SIZE,
                             num_workers=NUM_WORKERS,
                             cuda=True)

MODEL = FurnitureSqueezeNet350(pretrained=True)

N_EPOCHS = 100

OPTIM = SGD(params=[
    {
예제 #6
0
# Check training dataflow
from pathlib import Path
from torchvision.transforms import ToTensor
from common.data_loaders import get_data_loader


SEED = 12345
DEBUG = True

OUTPUT_PATH = "output/train_dataflow"
DATASET_PATH = Path("/home/fast_storage/imaterialist-challenge-furniture-2018/")


N_CLASSES = 128
N_EPOCHS = 1


BATCH_SIZE = 1
NUM_WORKERS = 8

DATA_LOADER = get_data_loader(
    dataset_path=DATASET_PATH / "train",
    data_transform=[ToTensor(), ],
    batch_size=BATCH_SIZE,
    num_workers=NUM_WORKERS,
    cuda=False)
예제 #7
0
batch_size = 64
num_workers = 15

train_dataset = FilesFromCsvDataset("output/unique_filtered_train_dataset.csv")
val_dataset = FilesFromCsvDataset("output/unique_filtered_val_dataset.csv")
trainval_dataset = ConcatDataset([train_dataset, val_dataset])


# #### Stratified split :
fold_index = 2
n_splits = 4
train_index, val_index = get_trainval_indices(trainval_dataset,
                                              fold_index=fold_index, n_splits=n_splits,
                                              xy_transforms=None,
                                              batch_size=batch_size, n_workers=8,
                                              seed=SEED)
# ####

TEST_LOADER = get_data_loader(trainval_dataset,
                              data_transform=TEST_TRANSFORMS,
                              sample_indices=val_index,
                              batch_size=batch_size,
                              num_workers=num_workers,
                              pin_memory="cuda" in DEVICE)


MODEL = (Path("output") / "cv" / "nasnetlarge_350_resized_crop" / "fold_2" / "20180525_1653" /
         "model_FurnitureNASNetALarge350_9_val_loss=0.4992649.pth").as_posix()

N_TTA = 7