Exemple #1
0
def densenet_regression(x,
                        y,
                        hidden_dims,
                        loss_fn=nn.MSELoss(),
                        lr=1e-2,
                        weight_decay=1e-4,
                        num_iters=1000,
                        print_every=100,
                        device=torch.device('cuda'),
                        verbose=True,
                        plot=True):
    """Use DenseNet with linear layers for regression

    Returns:
        model: nn.Module, learned regression model
    """
    in_dim = x.size(-1)
    out_dim = y.size(-1)
    hidden_dims = hidden_dims + [out_dim]
    model = DenseNet(in_dim, hidden_dims, dense=True,
                     residual=False).to(device)
    optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                        model.parameters()),
                                 lr=lr,
                                 weight_decay=weight_decay,
                                 amsgrad=True)
    for i in range(num_iters):
        y_pred = model(x)
        loss = loss_fn(y_pred, y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if verbose and (i % print_every == 0):
            print(i, loss.item())
    if plot:
        plt.plot(x.detach().cpu().numpy().reshape(-1),
                 y.detach().cpu().numpy().reshape(-1), 'ro',
                 x.detach().cpu().numpy().reshape(-1),
                 y_pred.detach().cpu().numpy().reshape(-1), 'g--')
        plt.show()
    return model
Exemple #2
0
        test_err_logger.log(state['epoch'] - 1, classerr.value()[0])
        confusion_logger.log(confusion_meter.value())

    print('[Epoch {:03d}] Test loss: {:.4f}\tTop 1: {:.2f}\tTop 5: {:.2f}'.
          format(state['epoch'] - 1,
                 meter_loss.value()[0], classerr.value(k=1),
                 classerr.value(k=5)))


if args.test_only:
    engine.hooks['on_sample'] = on_sample
    engine.hooks['on_forward'] = on_forward
    engine.test(network=network_forward, iterator=test_loader)
    print('Test loss: {:.4f}\tTop 1: {:.2f}\tTop 5: {:.2f}'.format(
        meter_loss.value()[0], classerr.value(k=1), classerr.value(k=5)))
else:
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)
    engine.hooks['on_start'] = on_start
    engine.hooks['on_sample'] = on_sample
    engine.hooks['on_forward'] = on_forward
    engine.hooks['on_start_epoch'] = on_start_epoch
    engine.hooks['on_end_epoch'] = on_end_epoch
    engine.train(network=network_forward,
                 iterator=train_loader,
                 maxepoch=args.epochs,
                 optimizer=optimizer)
Exemple #3
0
def demo(data, save, depth=100, growth_rate=12, efficient=True, valid_size=5000,
         n_epochs=300, batch_size=64, seed=None):
    """
    A demo to show off training of efficient DenseNets.
    Trains and evaluates a DenseNet-BC on CIFAR-10.

    Args:
        data (str) - path to directory where data should be loaded from/downloaded
            (default $DATA_DIR)
        save (str) - path to save the model to (default /tmp)

        depth (int) - depth of the network (number of convolution layers) (default 40)
        growth_rate (int) - number of features added per DenseNet layer (default 12)
        efficient (bool) - use the memory efficient implementation? (default True)

        valid_size (int) - size of validation set
        n_epochs (int) - number of epochs for training (default 300)
        batch_size (int) - size of minibatch (default 256)
        seed (int) - manually set the random seed (default None)
    """

    # Get densenet configuration
    if (depth - 4) % 3:
        raise Exception('Invalid depth')
    block_config = [(depth - 4) // 6 for _ in range(3)]

    # Data transforms
    mean=[0.49139968  0.48215841  0.44653091]
    stdv= [0.24703223  0.24348513  0.26158784]
    train_transforms = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=stdv),
    ])
    test_transforms = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=stdv),
    ])

    # Datasets
    train_set = datasets.CIFAR10(data, train=True, transform=train_transforms, download=True)
    test_set = datasets.CIFAR10(data, train=False, transform=test_transforms, download=False)

    if valid_size:
        valid_set = datasets.CIFAR10(data, train=True, transform=test_transforms)
        indices = torch.randperm(len(train_set))
        train_indices = indices[:len(indices) - valid_size]
        valid_indices = indices[len(indices) - valid_size:]
        train_set = torch.utils.data.Subset(train_set, train_indices)
        valid_set = torch.utils.data.Subset(valid_set, valid_indices)
    else:
        valid_set = None

    # Models
    model = DenseNet(
        growth_rate=growth_rate,
        block_config=block_config,
        num_init_features=growth_rate*2,
        num_classes=10,
        small_inputs=True,
        efficient=efficient,
    )
    print(model)
    
    # Print number of parameters
    num_params = sum(p.numel() for p in model.parameters())
    print("Total parameters: ", num_params)

    # Make save directory
    if not os.path.exists(save):
        os.makedirs(save)
    if not os.path.isdir(save):
        raise Exception('%s is not a dir' % save)

    # Train the model
    train(model=model, train_set=train_set, valid_set=valid_set, test_set=test_set, save=save,
          n_epochs=n_epochs, batch_size=batch_size, seed=seed)
    print('Done!')
Exemple #4
0
from models import DenseNet
from config import config
from preprocess import train_data_iterator, test_data_helper

net = DenseNet(
    growth_rate=32,
    block_config=[3, 3, 3],
    num_classes=config.charlen * config.captlen,
    small_inputs=False,
    efficient=True,
)

net = net.cuda()

# Optimizer
optimizer = torch.optim.SGD(net.parameters(),
                            lr=0.1,
                            momentum=0.9,
                            nesterov=True,
                            weight_decay=0.0001)

best_acc = config.baseline

loss_fn = torch.nn.BCEWithLogitsLoss(reduce=False)

for epoch in range(config.epochs):
    for i, (input, target) in enumerate(train_data_iterator()):
        input = torch.FloatTensor(input)
        target = torch.LongTensor(target)
        input_var = torch.autograd.Variable(input.cuda(async=True))
        target_var = torch.autograd.Variable(target.cuda(async=True))