コード例 #1
0
def get_model(dataset_type):
    if dataset_type == 'mnist':
        model = MNISTNet().to(device)
    elif dataset_type == 'cifar10':
        # model = CIFAR10Net().to(device)
        model = CNN9Layer(num_classes=10, input_shape=3).to(device)
        # model = ResNet18(num_classes=10).to(device)
    elif dataset_type == 'cifar100':
        # model = CNN9Layer(num_classes=100, input_shape=3).to(device)
        model = ResNet18(num_classes=100).to(device)
    return model
コード例 #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', type=str, default='configs/config.json')
    parser.add_argument('--no-cuda', action='store_true')
    parser.add_argument('--parallel', action='store_true')
    args = parser.parse_args()
    args.cuda = torch.cuda.is_available() and not args.no_cuda
    print(args)

    device = torch.device('cuda' if args.cuda else 'cpu')

    config = load_json(args.config)

    model = MNISTNet()
    if args.parallel:
        model = nn.DataParallel(model)
    model.to(device)

    optimizer = optim.Adam(model.parameters(), **config['adam'])
    scheduler = optim.lr_scheduler.StepLR(optimizer, **config['steplr'])

    train_loader, valid_loader = mnist_loader(**config['dataset'])

    trainer = Trainer(model, optimizer, train_loader, valid_loader, device)

    output_dir = os.path.join(config['output_dir'],
                              datetime.now().strftime('%Y%m%d_%H%M%S'))
    os.makedirs(output_dir, exist_ok=True)

    # save config to output dir
    save_json(config, os.path.join(output_dir, 'config.json'))

    for epoch in range(config['epochs']):
        scheduler.step()

        train_loss, train_acc = trainer.train()
        valid_loss, valid_acc = trainer.validate()

        print(
            'epoch: {}/{},'.format(epoch + 1, config['epochs']),
            'train loss: {:.4f}, train acc: {:.2f}%,'.format(
                train_loss, train_acc * 100),
            'valid loss: {:.4f}, valid acc: {:.2f}%'.format(
                valid_loss, valid_acc * 100))

        torch.save(
            model.state_dict(),
            os.path.join(output_dir, 'model_{:04d}.pt'.format(epoch + 1)))
コード例 #3
0
log_path = common.create_log_dir(args.exp_id, task, args.loss)
print(f"Logging to {log_path}")

# Dumping all script arguments
common.dump_params(join(log_path, 'config.cfg'), args)

# Set custom seed before doing anything
common.set_custom_seed(args.seed)

# Load dataset and create model
print(f"[Task: {task.upper()}]")
print(f"[Loss: {args.loss.upper()}]")
print('[Loading Dataset...]')
nfeat, nclass = 2, 10
config = common.get_config(args.loss, nfeat, nclass, task, args.margin)
model = MNISTNet(nfeat, loss_module=config.loss_module)
dataset = MNIST(args.path, args.batch_size)

dev = dataset.dev_partition()
train = dataset.training_partition()

print('[Dataset Loaded]')

# Train and evaluation plugins
test_callbacks = []
train_callbacks = []

# Logging configuration
if args.log_interval in range(1, 101):
    print(
        f"[Logging: {common.enabled_str(True)} (every {args.log_interval}%)]")
コード例 #4
0
if __name__ == "__main__":
    set_seed(args.seed)

    # create output folder
    now = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
    output_path = os.path.join(args.log_dir, args.project_name + ' ' + now)
    os.makedirs(output_path)

    # get data loader
    train_loader, val_loader, test_loader, clean_sample_idx, noisy_sample_idx, dataset_len = create_dataloader(
        args.dataset, args.dataset_path, args.noise_type, args.noise_rate)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    if args.dataset == 'mnist':
        model = MNISTNet().to(device)
    elif args.dataset == 'cifar10':
        # model = CIFAR10Net().to(device)
        model = CNN9Layer(num_classes=10, input_shape=3).to(device)
        # model = ResNet18(num_classes=10).to(device)
    elif args.dataset == 'cifar100':
        # model = CNN9Layer(num_classes=100, input_shape=3).to(device)
        model = ResNet18(num_classes=100).to(device)

    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=args.l2_reg)

    train_loss_lst, val_loss_lst = [], []
    train_acc_lst, val_acc_lst = [], []
コード例 #5
0
def main():
    parser = argparse.ArgumentParser(
        description='PyTorch L-Softmax MNIST Example')
    parser.add_argument(
        '--margin',
        type=int,
        default=4,
        metavar='M',
        help='the margin for the l-softmax formula (m=1, 2, 3, 4)')
    parser.add_argument('--batch-size',
                        type=int,
                        default=256,
                        metavar='N',
                        help='input batch size for training (default: 256)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=100,
                        metavar='N',
                        help='number of epochs to train (default: 100)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.1,
                        metavar='LR',
                        help='initial learning rate (default: 0.1)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.9,
                        metavar='M',
                        help='SGD momentum (default: 0.9)')
    parser.add_argument('--weight-decay',
                        type=float,
                        default=5e-4,
                        metavar='W',
                        help='SGD weight decay (default: 0.0005)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument(
        '--vis',
        default=False,
        metavar='V',
        help='enables visualizing 2d features (default: False).')
    args = parser.parse_args()
    print(args)
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    criterion = nn.CrossEntropyLoss().to(device)
    if args.vis:
        model = MNISTFIG2Net(margin=args.margin, device=device).to(device)
    else:
        model = MNISTNet(margin=args.margin, device=device).to(device)
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)

    for epoch in range(1, args.epochs + 1):
        adjust_learning_rate(args, optimizer, epoch)
        train(args, model, criterion, device, train_loader, optimizer, epoch)
        test(args, model, criterion, device, test_loader)

    if args.vis:
        plot_2d_features(args, model, device, test_loader)
コード例 #6
0
    # create output folder
    now = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
    output_path = os.path.join(args.log_dir, args.project_name + ' ' + now)
    os.makedirs(output_path)

    # get data loader
    train_loader, val_loader, test_loader, clean_sample_idx, noisy_sample_idx, dataset_len = create_dataloader(
        args.dataset,
        args.dataset_path,
        args.noise_type,
        args.noise_rate)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    if args.dataset == 'mnist':
        model = MNISTNet().to(device)
    elif args.dataset == 'cifar10':
        model = CNN9Layer(num_classes=args.num_classes, input_shape=1).to(device)

    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.l2_reg)

    train_loss_lst, val_loss_lst = [], []
    train_acc_lst, val_acc_lst = [], []

    clean_mean_loss_lst, noisy_mean_loss_lst = [], []
    clean_min_loss_lst, noisy_min_loss_lst = [], []
    clean_max_loss_lst, noisy_max_loss_lst = [], []

    sample_loss = np.zeros(dataset_len)

    # main loop(train,val,test)