Ejemplo n.º 1
0
def train_tf_main():
    aicrowd_helpers.register_progress(0.0)
    start_time = time.time()
    train.train_with_gin(os.path.join(experiment_output_path, "model"),
                         overwrite, [get_full_path("model.gin")], gin_bindings)
    # path=os.path.join(experiment_output_path, str(time.time()))
    # train.train_with_gin(
    #     path, overwrite,
    #     [get_full_path("model.gin")], gin_bindings)
    elapsed_time = time.time() - start_time
    print(
        "##################################Elapsed TIME##############################"
    )
    print(elapsed_time)
    print(
        "##################################Elapsed TIME##############################"
    )
    ########################################################################
    # Register Progress (end of training, start of representation extraction)
    ########################################################################
    aicrowd_helpers.register_progress(0.90)
    # Extract the mean representation for both of these models.
    representation_path = os.path.join(experiment_output_path,
                                       "representation")
    model_path = os.path.join(experiment_output_path, "model")
    # model_path =path
    # representation_path=path
    # This contains the settings:
    postprocess_gin = [get_full_path("postprocess.gin")]
    postprocess.postprocess_with_gin(model_path, representation_path,
                                     overwrite, postprocess_gin)
    print("Written output to : ", experiment_output_path)
    ########################################################################
    # Register Progress (of representation extraction)
    ########################################################################
    aicrowd_helpers.register_progress(1.0)
    ########################################################################
    # Submit Results for evaluation
    ########################################################################
    cuda.close()
    aicrowd_helpers.submit()

    return elapsed_time, gin_bindings
Ejemplo n.º 2
0
def train_pytorch_main():
    # Go!
    # start_time = time.time()
    aicrowd_helpers.execution_start()
    aicrowd_helpers.register_progress(0.)
    # Training loop
    start_time = time.time()
    for epoch in range(1, args.epochs + 1):
        train(epoch)
    # Almost done...
    elapsed_time = time.time() - start_time
    file_name = "model.pth"
    torch.save(model.state_dict(), file_name)
    aicrowd_helpers.register_progress(0.90)
    # Export the representation extractor
    pyu.export_model(RepresentationExtractor(model.encoder, 'mean'),
                     input_shape = (1, 3, 64, 64))
    # Done!
    aicrowd_helpers.register_progress(1.0)
    aicrowd_helpers.submit()

    return elapsed_time, args
Ejemplo n.º 3
0
        optimizer.zero_grad()
        recon_batch, mu, logvar = model(data)
        loss = loss_function(recon_batch, data, mu, logvar)
        loss.backward()
        train_loss += loss.item()
        optimizer.step()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader),
                loss.item() / len(data)))

    print('====> Epoch: {} Average loss: {:.4f}'.format(
        epoch, train_loss / len(train_loader.dataset)))


if __name__ == '__main__':
    # Go!
    aicrowd_helpers.execution_start()
    aicrowd_helpers.register_progress(0.)
    # Training loop
    for epoch in range(1, args.epochs + 1):
        train(epoch)
    # Almost done...
    aicrowd_helpers.register_progress(0.90)
    # Export the representation extractor
    pyu.export_model(RepresentationExtractor(model.encoder, 'mean'),
                     input_shape=(1, 3, 64, 64))
    # Done!
    aicrowd_helpers.register_progress(1.0)
def main(args,
         seed=1,
         dataset_size=0,
         vae_batch_size=64,
         use_pca=False,
         cache_features_on_disk=True,
         sequential_dataset=False,
         transform_class=feature_transforms.MaximumTransform):
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    write_config(args.experiment_name)
    stats_logger = StatsLogger(args.experiment_name)

    np.random.seed(seed)
    torch.manual_seed(seed)

    device = torch.device('cuda' if args.cuda else 'cpu')
    print(f'Running on {device}')

    if args.cuda:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    shuffle = False if sequential_dataset else True
    kwargs = {'num_workers': 2, 'pin_memory': True} if args.cuda else {}
    loader = pyu.get_loader(batch_size=args.batch_size,
                            sequential=sequential_dataset,
                            shuffle=shuffle,
                            **kwargs)

    if sequential_dataset:
        if dataset_size <= 0:
            dataset_size = len(loader.dataset)
        else:
            loader.dataset.iterator_len = dataset_size
    else:
        if dataset_size <= 0:
            loader.dataset.iterator_len = len(loader.dataset.dataset.images)
            dataset_size = loader.dataset.iterator_len
        else:
            loader.dataset.iterator_len = dataset_size

    print(f'Training with {loader.dataset.iterator_len} images')

    aicrowd_helpers.execution_start()
    aicrowd_helpers.register_progress(0.)

    feature_extractor = feature_extractors.get_feature_extractor()
    feature_extractor.to(device)
    feature_transform = transform_class()
    feature_transform.to(device)

    # Extract and transform features from images
    dataset = get_transformed_feature_dataset(feature_extractor,
                                              feature_transform, loader,
                                              dataset_size,
                                              cache_features_on_disk,
                                              args.features_dir, use_pca,
                                              device, args.log_interval)
    # Train VAE on the aggregated features
    loader = DataLoader(dataset,
                        batch_size=vae_batch_size,
                        shuffle=True,
                        num_workers=2,
                        pin_memory=True)
    aicrowd_helpers.register_progress(0.40)

    model = train_vae(loader, device, stats_logger)

    aicrowd_helpers.register_progress(0.90)

    # Export the representation extractor
    vae_extractor = vae.get_representation_extractor(model)
    pyu.export_model(RepresentationExtractor(feature_extractor,
                                             feature_transform, vae_extractor),
                     input_shape=(1, 3, 64, 64),
                     cuda=args.cuda)

    # Done!
    aicrowd_helpers.register_progress(1.0)
    aicrowd_helpers.submit()