Esempio n. 1
0
        torch.cuda.manual_seed_all(args.rand_seed)
        cudnn.benchmark = True

    lr = args.lr  # current learning rate
    start_epoch = 1  # start from epoch 1 or last checkpoint epoch

    if not os.path.isdir(args.save):
        os.mkdir(args.save)

    save_folder = name_save_folder(args)
    if not os.path.exists('trained_nets/' + save_folder):
        os.makedirs('trained_nets/' + save_folder)

    f = open('trained_nets/' + save_folder + '/log.out', 'a')

    trainloader, testloader = dataloader.get_data_loaders(args)

    if args.label_corrupt_prob and not args.resume_model:
        torch.save(trainloader,
                   'trained_nets/' + save_folder + '/trainloader.dat')
        torch.save(testloader,
                   'trained_nets/' + save_folder + '/testloader.dat')

    # Model
    if args.resume_model:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        checkpoint = torch.load(args.resume_model)
        net = model_loader.load(args.model)
        net.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch'] + 1
from dataloader import get_data_loaders
from network import neural_network_model
from utils import get_information, plot_info_plane
# from calc_info import get_information
from deepclustering.utils import class2one_hot
from deepclustering.loss.loss import KL_div

device = torch.device('cuda')
net = neural_network_model()
net.cuda()
optimizer = optim.Adam(net.parameters(), lr=0.01)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                           milestones=[40, 80, 120],
                                           gamma=0.5)
criterion = KL_div()
train_loader, test_loader = get_data_loaders()

for i in range(1000):
    for _, (data, labels) in enumerate(train_loader):
        data, labels = data.to(device), labels.to(device)
        preds, features = net(data)
        loss = criterion(preds, class2one_hot(labels, 2).float())
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        info = get_information([[w.detach().cpu().numpy() for w in features]],
                               data.cpu().numpy(),
                               class2one_hot(labels, 2).cpu().numpy(),
                               epoch_num=0)
        acc = ((preds.max(1)[1] == labels.long()).sum().float() /
Esempio n. 3
0
        f'{total_time:.2f} total seconds elapsed. {total_time / (epoch):.2f} '
        'seconds per epoch.')
    # Format history
    history = pd.DataFrame(
        history,
        columns=['train_loss', 'valid_loss', 'train_acc', 'valid_acc'])

    return model, history


if __name__ == '__main__':

    save_file_name = './models/vgg16-transfer-4pt'
    checkpoint_path = './models/vgg16-transfer-4.pth'

    dataloaders = get_data_loaders()
    model = get_pretrained_model(model_name='resnet50')
    criterion = nn.NLLLoss()
    optimizer = optim.Adam(model.parameters())

    model, history = train(model,
                           criterion,
                           optimizer,
                           dataloaders['train'],
                           dataloaders['val'],
                           save_file_name=save_file_name,
                           max_epochs_stop=5,
                           n_epochs=30,
                           print_every=1)

    history.to_csv('./artifacts/first_train.csv', index=False)
Esempio n. 4
0
    return pil_img


if __name__ == '__main__':

    single_sample = True

    outputdir = os.path.dirname(os.path.abspath(__file__))

    os.chdir(outputdir)

    use_gpu = torch.cuda.is_available()

    own_net = SegNet(3, 12)  #OwnSegNet(3)

    loaders, w_class, class_encoding, sets = dataloader.get_data_loaders(
        camvid_dataset, 1, 1, 1, single_sample=single_sample)
    trainloader, valloader, testloader = loaders
    test_set, val_set, train_set = sets

    for i, key in enumerate(class_encoding.keys()):
        print("{} \t {}".format(i, key))

    optimizer = optim.SGD(own_net.parameters(),
                          lr=1e-3,
                          weight_decay=5e-4,
                          momentum=0.9)

    # Evaluation metric

    ignore_index = list(class_encoding).index('unlabeled')
Esempio n. 5
0
def main(config):
    # initialize random seeds
    random.seed(config.rand_seed)
    np.random.seed(config.rand_seed)
    torch.manual_seed(config.rand_seed)
    torch.cuda.manual_seed_all(config.rand_seed)
    cudnn.benchmark = True

    # initialize output directories
    config.out_model_dir = os.path.join(config.out_dir, 'params', config.model_config)
    config.out_loss_dir = os.path.join(config.out_dir, 'loss', config.model_config)
    config.out_fig_dir = os.path.join(config.out_dir, 'fig', config.model_config)
    for d in [config.out_model_dir, config.out_loss_dir, config.out_fig_dir]:
        if not os.path.exists(d):
            os.makedirs(d)

    # initialize the model
    model = models[config.model](config.model_config).to(0)
    init_params(model)
    train_loader, test_loader = get_data_loaders(config)

    # train the model from scratch
    if config.train:
        if config.optimizer == 'SGD':
            optimizer = optim.SGD(model.parameters(), lr=config.lr, momentum=0.9, weight_decay=config.weight_decay,
                                  nesterov=True)
        else:
            optimizer = optim.Adam(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)
        scheduler = StepLR(optimizer, step_size=1, gamma=config.lr_decay)
        for epoch in tqdm(range(config.epoch_num), ncols=50):
            running_loss = .0
            for inputs, labels in train_loader:
                optimizer.zero_grad()
                loss = model(inputs.to(0), labels.to(0))
                loss.backward()
                optimizer.step()
                running_loss += loss.item() * len(inputs)
            if epoch == 149 or epoch == 224 or epoch == 274:
                scheduler.step()
            print("Epoch {} Average Loss: {}".format(epoch + 1, running_loss / len(train_loader.dataset)))

        correct = 0
        total = 0
        model.eval()
        with torch.no_grad():
            for data in test_loader:
                images, labels = data
                outputs = model(images.to(0))
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels.to(0)).sum().item()

        # record the model parameters and test acc
        print('Accuracy of the network on the 10000 test images: {}'.format(correct / total * 100))
        state = {'state_dict': model.state_dict(), 'acc': correct/total}
        torch.save(state, os.path.join(config.out_model_dir, 'model_state'))

    # if trained already, load parameters from given directory
    else:
        model.load_state_dict(torch.load(os.path.join(config.out_model_dir, 'model_state'))['state_dict'])
        model.eval()

    # calculate loss values
    loss_surface_2d(config, model, test_loader)