Ejemplo n.º 1
0
def train_model(modname='alexnet', pm_ch='both', bs=16):
    """
    Args:
        modname (string): Name of the model. Has to be one of the values:
            'alexnet', batch 64
            'densenet'
            'inception'
            'resnet', batch 16
            'squeezenet', batch 16
            'vgg'
        pm_ch (string): pixelmap channel -- 'time', 'charge', 'both', default to both
    """
    # device configuration
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    # hyper parameters
    max_epochs = 10
    learning_rate = 0.001

    # determine number of input channels
    nch = 2
    if pm_ch != 'both':
        nch = 1

    ds = PixelMapDataset('training_file_list.txt', pm_ch)
    # try out the data loader utility
    dl = torch.utils.data.DataLoader(dataset=ds, batch_size=bs, shuffle=True)

    # define model
    model = None
    if modname == 'alexnet':
        model = alexnet(num_classes=3, in_ch=nch).to(device)
    elif modname == 'densenet':
        model = DenseNet(num_classes=3, in_ch=nch).to(device)
    elif modname == 'inception':
        model = inception_v3(num_classes=3, in_ch=nch).to(device)
    elif modname == 'resnet':
        model = resnet18(num_classes=3, in_ch=nch).to(device)
    elif modname == 'squeezenet':
        model = squeezenet1_1(num_classes=3, in_ch=nch).to(device)
    elif modname == 'vgg':
        model = vgg19_bn(in_ch=nch, num_classes=3).to(device)
    else:
        print('Model {} not defined.'.format(modname))
        return

    # loss and optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    # training process
    total_step = len(dl)
    for epoch in range(max_epochs):
        for i, (view1, view2, local_labels) in enumerate(dl):
            view1 = view1.float().to(device)
            if modname == 'inception':
                view1 = nn.ZeroPad2d((0, 192, 102, 101))(view1)
            else:
                view1 = nn.ZeroPad2d((0, 117, 64, 64))(view1)
            local_labels = local_labels.to(device)

            # forward pass
            outputs = model(view1)
            loss = criterion(outputs, local_labels)

            # backward and optimize
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (i + 1) % bs == 0:
                print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(
                    epoch + 1, max_epochs, i + 1, total_step, loss.item()))

    # save the model checkpoint
    save_path = '../../../data/two_views/saved_models/{}/{}'.format(
        modname, pm_ch)
    os.makedirs(save_path, exist_ok=True)
    torch.save(model.state_dict(), os.path.join(save_path, 'model.ckpt'))
Ejemplo n.º 2
0
    test = list(test_loader)
    test = list(zip(*test))
    X_test = torch.cat(test[0], 0)
    y_test = torch.cat(test[1], 0)

    data = {
        'X_train': X_train,
        'y_train': y_train,
        'X_val': X_val,
        'y_val': y_val,
        'X_test': X_test,
        'y_test': y_test
    }

    model = DenseNet(input_param=(1, 64),
                     block_layers=(6, 4),
                     num_classes=10,
                     growth_rate=32,
                     bn_size=2,
                     dropout_rate=0,
                     transition_pool_param=(3, 1, 1))

    loss_fn = nn.CrossEntropyLoss()

    optimizer = torch.optim.Adam(model.parameters(), weight_decay=1e-4)

    solver = Solver(model, data, optimizer, loss_fn)
    solver.train(num_epoch=2, file_prefix='mnist-')
    solver.predict(file_prefix='mnist-')
            f"Loaded parameters from saved model: LR is"
            f" {helper.params['lr']} and current epoch is {helper.start_epoch}"
        )
    else:
        helper.start_epoch = 1

    logger.info(
        f'Total number of params for model {helper.params["model"]}: {sum(p.numel() for p in net.parameters() if p.requires_grad)}'
    )
    if dp:
        criterion = nn.CrossEntropyLoss(reduction='none')
    else:
        criterion = nn.CrossEntropyLoss()

    if helper.params['optimizer'] == 'SGD':
        optimizer = optim.SGD(net.parameters(),
                              lr=lr,
                              momentum=momentum,
                              weight_decay=decay)
    elif helper.params['optimizer'] == 'Adam':
        optimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=decay)
    else:
        raise Exception('Specify `optimizer` in params.yaml.')

    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=[0.5 * epochs, 0.75 * epochs], gamma=0.1)

    table = create_table(helper.params)
    writer.add_text('Model Params', table)
    logger.info(table)
    logger.info(helper.labels)
Ejemplo n.º 4
0
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck')

print('==> Making model..')

net = DenseNet(growth_rate=args.growth_rate,
               theta=args.theta,
               num_layers=[12, 12, 12],
               num_classes=10)
net = net.to(device)
if device == 'cuda':
    net = torch.nn.DataParallel(net)
    cudnn.benchmark = True

num_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
print('The number of parameters of model is', num_params)

if args.resume is not None:
    checkpoint = torch.load('./save_model/' + args.resume)
    net.load_state_dict(checkpoint['net'])

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(),
                      lr=args.lr,
                      momentum=args.momentum,
                      weight_decay=args.weight_decay)

decay_epoch = [150, 225]
scheduler = lr_scheduler.MultiStepLR(optimizer,
                                     milestones=decay_epoch,