def main(args,
         seed=1,
         dataset_size=0,
         vae_batch_size=64,
         use_pca=False,
         cache_features_on_disk=True,
         sequential_dataset=False,
         transform_class=feature_transforms.MaximumTransform):
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    write_config(args.experiment_name)
    stats_logger = StatsLogger(args.experiment_name)

    np.random.seed(seed)
    torch.manual_seed(seed)

    device = torch.device('cuda' if args.cuda else 'cpu')
    print(f'Running on {device}')

    if args.cuda:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    shuffle = False if sequential_dataset else True
    kwargs = {'num_workers': 2, 'pin_memory': True} if args.cuda else {}
    loader = pyu.get_loader(batch_size=args.batch_size,
                            sequential=sequential_dataset,
                            shuffle=shuffle,
                            **kwargs)

    if sequential_dataset:
        if dataset_size <= 0:
            dataset_size = len(loader.dataset)
        else:
            loader.dataset.iterator_len = dataset_size
    else:
        if dataset_size <= 0:
            loader.dataset.iterator_len = len(loader.dataset.dataset.images)
            dataset_size = loader.dataset.iterator_len
        else:
            loader.dataset.iterator_len = dataset_size

    print(f'Training with {loader.dataset.iterator_len} images')

    aicrowd_helpers.execution_start()
    aicrowd_helpers.register_progress(0.)

    feature_extractor = feature_extractors.get_feature_extractor()
    feature_extractor.to(device)
    feature_transform = transform_class()
    feature_transform.to(device)

    # Extract and transform features from images
    dataset = get_transformed_feature_dataset(feature_extractor,
                                              feature_transform, loader,
                                              dataset_size,
                                              cache_features_on_disk,
                                              args.features_dir, use_pca,
                                              device, args.log_interval)
    # Train VAE on the aggregated features
    loader = DataLoader(dataset,
                        batch_size=vae_batch_size,
                        shuffle=True,
                        num_workers=2,
                        pin_memory=True)
    aicrowd_helpers.register_progress(0.40)

    model = train_vae(loader, device, stats_logger)

    aicrowd_helpers.register_progress(0.90)

    # Export the representation extractor
    vae_extractor = vae.get_representation_extractor(model)
    pyu.export_model(RepresentationExtractor(feature_extractor,
                                             feature_transform, vae_extractor),
                     input_shape=(1, 3, 64, 64),
                     cuda=args.cuda)

    # Done!
    aicrowd_helpers.register_progress(1.0)
    aicrowd_helpers.submit()
Exemplo n.º 2
0
                    help='random seed (default: 1)')
parser.add_argument(
    '--log-interval',
    type=int,
    default=10,
    metavar='N',
    help='how many batches to wait before logging training status')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

torch.manual_seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")

kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = pyu.get_loader(batch_size=args.batch_size, **kwargs)


class Encoder(nn.Module):
    def __init__(self):
        super(Encoder, self).__init__()
        self.tail = nn.Sequential(nn.Linear(4096 * 3, 400), nn.ReLU())
        self.head_mu = nn.Linear(400, 20)
        self.head_logvar = nn.Linear(400, 20)

    def forward(self, x):
        h = self.tail(x.contiguous().view(-1, 4096 * 3))
        return self.head_mu(h), self.head_logvar(h)


class Decoder(nn.Sequential):
Exemplo n.º 3
0
    '--log-interval',
    type=int,
    default=10,
    metavar='N',
    help='how many batches to wait before logging training status')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

torch.manual_seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")

kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
# iterator_len defines length of an epoch
train_loader = pyu.get_loader(batch_size=args.batch_size,
                              iterator_len=args.batch_size * 300,
                              **kwargs)


# copied from
# https://stackoverflow.com/questions/61039700/using-flatten-in-pytorch-v1-0-sequential-module
class Flatten(nn.Module):
    def forward(self, input):
        return input.view(input.size(0), -1)


# copied from
# https://discuss.pytorch.org/t/how-to-build-a-view-layer-in-pytorch-for-sequential-models/53958/11
class View(nn.Module):
    def __init__(self, shape):
        super().__init__()