コード例 #1
0
n_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
print(f'num. of params: {n_params}')

dataset_dict = config['data']
for dataset_name in dataset_dict:
    data_path = dataset_dict[dataset_name]
    if data_path.split(' ')[-1] == 'notest' or data_path is None:
        continue
    result_dir = os.path.join(output_dir, dataset_name)
    os.makedirs(result_dir, exist_ok=True)

    dataset = jsonDataset(
        path=data_path,
        classes=target_classes,
        transform=valid_transforms,
        input_image_size=img_size,
        num_crops=-1,
        is_norm_reg_target=config['params']['norm_reg_target'],
        fpn_level=5,
        radius=float(config['params']['radius']))
    assert dataset
    num_data = len(dataset)
    batch_size = int(config['inference']['batch_size'])
    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              num_workers=0,
                                              collate_fn=dataset.collate_fn,
                                              pin_memory=True)

    timer_infer = Timer()
コード例 #2
0
print(f'num. of params: {n_params}')

dataset_dict = config['data']
for dataset_name in dataset_dict:
    data_path = dataset_dict[dataset_name]
    if data_path is None:
        continue
    if data_path.split(' ')[-1] == 'notest':
        continue
    print(dataset_name)
    result_dir = os.path.join(output_dir, dataset_name)
    os.makedirs(result_dir, exist_ok=True)

    dataset = jsonDataset(path=data_path,
                          classes=target_classes,
                          transform=valid_transforms,
                          input_image_size=img_size,
                          num_crops=-1)
    num_data = len(dataset)
    assert dataset
    data_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=config['inference']['batch_size'],
        shuffle=False,
        num_workers=0,
        collate_fn=dataset.collate_fn,
        pin_memory=True)

    timer_infer = Timer()
    timer_post = Timer()
コード例 #3
0
ファイル: train_horovod.py プロジェクト: HotaekHan/RetinaNet
            # A.ShiftScaleRotate(shift_limit=0.1, scale_limit=(-0.15, 0.15), rotate_limit=30, p=0.5,
            #                    border_mode=cv2.BORDER_CONSTANT, value=0),
        ], p=1.0)
    ], p=1.0),

    A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, p=1.0),
    ToTensorV2()
], bbox_params=bbox_params, p=1.0)
valid_transforms = A.Compose([
    A.Resize(height=img_size[0], width=img_size[1], p=1.0),
    A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, p=1.0),
    ToTensorV2()
], bbox_params=bbox_params, p=1.0)

train_dataset = jsonDataset(path=config['data']['train'].split(' ')[0], classes=target_classes,
                            transform=train_transforms,
                            input_image_size=img_size,
                            num_crops=config['params']['num_crops'])

valid_dataset = jsonDataset(path=config['data']['valid'].split(' ')[0], classes=target_classes,
                            transform=valid_transforms,
                            input_image_size=img_size,
                            num_crops=config['params']['num_crops'])

assert train_dataset
assert valid_dataset

# Horovod: use DistributedSampler to partition the training data.
train_sampler = torch.utils.data.distributed.DistributedSampler(
    train_dataset, num_replicas=hvd.size(), rank=hvd.rank())

train_loader = torch.utils.data.DataLoader(
コード例 #4
0
        raise ValueError('cifar10 has 10 classes. the number of exclude classes is over than num. of classes')

    train_data = CIFAR_split(dir_path='cifar-10-batches-py', num_include=num_classes,
                             train=True)
    num_train = len(train_data)
    num_valid = int(num_train * 0.2)
    num_train = num_train - num_valid

    train_dataset, valid_dataset = torch.utils.data.random_split(dataset=train_data,
                                                                 lengths=[num_train, num_valid],
                                                                 generator=torch.Generator().manual_seed(
                                                                     config['params']['seed']))
elif config['data']['name'] == 'its':
    target_classes = config['params']['classes'].split('|')
    num_classes = len(target_classes)
    train_dataset = jsonDataset(path=config['data']['train'].split(' ')[0], classes=target_classes)

    valid_dataset = jsonDataset(path=config['data']['valid'].split(' ')[0], classes=target_classes)
elif config['data']['name'] == 'landmark':
    train_data = Landmark_dataset(root='/data/kaggle/dacon_landmark_korea/public',
                                  is_train=True)
    num_classes = train_data.num_classes
    num_data = len(train_data)
    num_train = int(num_data * 0.7)
    num_valid = num_data - num_train
    train_dataset, valid_dataset = torch.utils.data.random_split(dataset=train_data,
                                                                 lengths=[num_train, num_valid],
                                                                 generator=torch.Generator().manual_seed(
                                                                     config['params']['seed']))
else:
    raise NotImplementedError('Unsupported Dataset: ' + str(config['data']['name']))
コード例 #5
0
    num_classes = 10
    train_data = datasets.CIFAR10(os.getcwd(),
                                  train=True,
                                  download=True,
                                  transform=None)
    num_train = len(train_data)
    num_valid = int(num_train * 0.2)
    num_train = num_train - num_valid

    train_dataset, valid_dataset = torch.utils.data.random_split(
        train_data, [num_train, num_valid])
elif config['data']['name'] == 'its':
    target_classes = config['params']['classes'].split('|')
    num_classes = len(target_classes)
    train_dataset = jsonDataset(path=config['data']['train'].split(' ')[0],
                                classes=target_classes,
                                transform=None,
                                input_image_size=img_size)

    valid_dataset = jsonDataset(path=config['data']['valid'].split(' ')[0],
                                classes=target_classes,
                                transform=None,
                                input_image_size=img_size)
else:
    raise NotImplementedError('Unsupported Dataset: ' +
                              str(config['data']['name']))

assert train_dataset
assert valid_dataset

train_loader = torch.utils.data.DataLoader(
    train_dataset,
コード例 #6
0
        ], p=1.0)
    ], p=1.0),

    A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, p=1.0),
    ToTensorV2()
], bbox_params=bbox_params, p=1.0)
valid_transforms = A.Compose([
    A.Resize(height=img_size[0], width=img_size[1], p=1.0),
    A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, p=1.0),
    ToTensorV2()
], bbox_params=bbox_params, p=1.0)

train_dataset = jsonDataset(path=config['data']['train'].split(' ')[0], classes=target_classes,
                            transform=train_transforms,
                            input_image_size=img_size,
                            num_crops=config['params']['num_crops'],
                            is_norm_reg_target=config['params']['norm_reg_target'],
                            fpn_level=5,
                            radius=float(config['params']['radius']))

valid_dataset = jsonDataset(path=config['data']['valid'].split(' ')[0], classes=target_classes,
                            transform=valid_transforms,
                            input_image_size=img_size,
                            num_crops=config['params']['num_crops'],
                            is_norm_reg_target=config['params']['norm_reg_target'],
                            fpn_level=5,
                            radius=float(config['params']['radius']))

train_loader = torch.utils.data.DataLoader(
    train_dataset, batch_size=config['params']['batch_size'],
    shuffle=True, num_workers=config['params']['data_worker'],
コード例 #7
0
elif config['data']['name'] == 'cifar10':
    num_classes = 10
    train_data = datasets.CIFAR10(os.getcwd(),
                                  train=True,
                                  download=True,
                                  transform=None)
    num_train = len(train_data)
    num_valid = int(num_train * 0.2)
    num_train = num_train - num_valid

    train_dataset, valid_dataset = torch.utils.data.random_split(
        train_data, [num_train, num_valid])
elif config['data']['name'] == 'its':
    target_classes = config['data']['classes'].split('|')
    num_classes = len(target_classes)
    train_dataset = jsonDataset(path='data/its_train_split.json',
                                classes=target_classes)

    valid_dataset = jsonDataset(path='data/its_valid_split.json',
                                classes=target_classes)
else:
    raise NotImplementedError('Unsupported Dataset: ' +
                              str(config['data']['name']))

assert train_dataset
assert valid_dataset

train_loader = torch.utils.data.DataLoader(
    train_dataset,
    batch_size=config['params']['batch_size'],
    shuffle=True,
    num_workers=config['params']['workers'],