Esempio n. 1
0
def get_network(opts):
    if opts.dataset == "mnist":
        capsnet = CapsNet(reconstruction_type=opts.decoder,
                          routing_iterations = opts.routing_iterations,
                          batchnorm=opts.batch_norm,
                          loss=opts.loss_type,
                          leaky_routing=opts.leaky_routing)
    if opts.dataset == "small_norb":
        if opts.decoder == "Conv":
            opts.decoder = "Conv32"
        capsnet = CapsNet(reconstruction_type=opts.decoder,
                          imsize=32,
                          num_classes=5,
                          routing_iterations = opts.routing_iterations, 
                          primary_caps_gridsize=8,
                          num_primary_capsules=32,
                          batchnorm=opts.batch_norm,
                          loss = opts.loss_type,
                          leaky_routing=opts.leaky_routing)
    if opts.dataset == "cifar10":
        if opts.decoder == "Conv":
            opts.decoder = "Conv32"
        capsnet = CapsNet(reconstruction_type=opts.decoder,
                          imsize=32, 
                          routing_iterations = opts.routing_iterations,
                          primary_caps_gridsize=8,
                          img_channels=3, 
                          batchnorm=opts.batch_norm,
                          num_primary_capsules=32,
                          loss=opts.loss_type,
                          leaky_routing=opts.leaky_routing)
    if opts.use_gpu:
        capsnet.cuda()
    if opts.gpu_ids:
        capsnet = GPUParallell(capsnet, opts.gpu_ids)
        print("Training on GPU IDS:", opts.gpu_ids)
    return capsnet
Esempio n. 2
0
    # Data Loader (Input Pipeline)
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=args.batch_size,
                                               num_workers=args.num_workers,
                                               shuffle=True)

    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=args.batch_size,
                                              num_workers=args.num_workers,
                                              shuffle=True)

    steps, lambda_, m = len(train_dataset) // args.batch_size, 1e-3, 0.2

    if use_cuda:
        print("activating cuda")
        model.cuda()

    for epoch in range(args.num_epochs):
        reset_meters()

        # Train
        print("Epoch {}".format(epoch))
        step = 0
        correct = 0
        loss = 0

        with tqdm(total=steps) as pbar:
            for data in train_loader:
                step += 1
                if lambda_ < 1:
                    lambda_ += 2e-1 / steps