Пример #1
0
def train(args):
    kwargs = {'num_workers': 4, 'pin_memory': True} if args.cuda else {}
    train_loader = data_.DataLoader(datasets.MNIST(
        './data', train=True, download=True, transform=transforms.ToTensor()),
                                    batch_size=args.batch_size,
                                    shuffle=True,
                                    **kwargs)
    test_loader = data_.DataLoader(datasets.MNIST(
        './data', train=False, transform=transforms.ToTensor()),
                                   batch_size=args.test_batch_size,
                                   shuffle=True,
                                   **kwargs)

    if args.binary == 'connect':
        net = model.BinaryConnect(args.in_features, args.out_features)
    else:
        net = model.BinaryNet(args.in_features, args.out_features)
    # net = nn.DataParallel(net)
    print(net)

    if args.cuda:
        net.cuda()

    optimizer = optim.Adam(net.parameters(), lr=args.lr)
    creterion = nn.NLLLoss()  ##negative log likelihood loss

    for epoch in range(1, args.epochs + 1):
        train_epoch(epoch, net, creterion, optimizer, train_loader, args)
        test_epoch(net, creterion, test_loader, args)
Пример #2
0
def train(args): 
    kwargs = {'num_workers': 2, 'pin_memory': True} if args.cuda else {}
    train_loader = data.DataLoader(
        datasets.MNIST('./data', train=True, download=True,
                       transform=transforms.ToTensor()),
                       batch_size=args.batch_size, shuffle=True, **kwargs)
    test_loader = data.DataLoader(
        datasets.MNIST('./data', train=False,
                       transform=transforms.ToTensor()),
                       batch_size=args.test_batch_size, shuffle=True, **kwargs)

    if args.binary == 'connect':
        net = model.BinaryConnect(args.in_features, args.out_features)
    elif args.binary == 'bnn':
        net = model.BinaryNet(args.in_features, args.out_features)
    elif args.binary == 'nn':
        net = model.Net(args.in_features, args.out_features)
    # net = nn.DataParallel(net)
    print(net)

    if args.cuda:
        net.cuda()

    if args.optimizer == 'Adam':
        optimizer = optim.Adam(net.parameters(), lr=args.lr)
    elif args.optimizer == 'LBFGS':
        if args.line_search_fn is None:
            optimizer = optim.LBFGS(net.parameters(), lr=args.lr)
        elif args.line_search_fn == 'strong_wolfe':
            optimizer = optim.LBFGS(net.parameters(), lr=args.lr, line_search_fn='strong_wolfe')
    criterion = nn.NLLLoss()

    for epoch in range(1, args.epochs+1):
        train_epoch(epoch, net, criterion, optimizer, train_loader, args)
        test_epoch(net, criterion, test_loader, args)
Пример #3
0
def train(args):
    kwargs = {'num_workers': 2, 'pin_memory': True} if args.cuda else {}
    train_loader = data.DataLoader(datasets.MNIST(
        './data', train=True, download=True, transform=transforms.ToTensor()),
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   **kwargs)
    test_loader = data.DataLoader(datasets.MNIST(
        './data', train=False, transform=transforms.ToTensor()),
                                  batch_size=args.test_batch_size,
                                  shuffle=True,
                                  **kwargs)

    net = model.BinaryConnect(args.topology, args.batch_norm)
    # net = nn.DataParallel(net)
    print(net)

    output_folder = vars(args).get('output_folder')

    if output_folder is not None:
        assert not os.path.exists(output_folder)
        os.makedirs(output_folder, exist_ok=False)
        with open(os.path.join(output_folder, 'command.txt'), 'w') as f:
            f.write(' '.join(sys.argv))

    if args.cuda:
        net.cuda()

    optimizer = optim.Adam(net.parameters(), lr=args.lr)
    creterion = nn.NLLLoss()

    train_accs = []
    test_accs = []

    assert args.epochs > 0
    for epoch in range(1, args.epochs + 1):
        train_acc = train_epoch(epoch, net, creterion, optimizer, train_loader,
                                args)
        test_acc = test_epoch(net, creterion, test_loader, args)

        train_accs.append(train_acc)
        test_accs.append(test_acc)

        if output_folder is not None:
            state_dict = {
                key: value.cpu()
                for key, value in net.state_dict().items()
            }
            torch.save(state_dict,
                       os.path.join(output_folder, '%s.pth' % (epoch, )))

    if output_folder is not None:
        np.save(os.path.join(output_folder, 'accuracy_train.npy'),
                np.array(train_accs))
        np.save(os.path.join(output_folder, 'accuracy_test.npy'),
                np.array(test_accs))
Пример #4
0
def train(args):
    kwargs = {'num_workers': 2, 'pin_memory': True} if args.cuda else {}
    train_loader = data.DataLoader(
        datasets.MNIST('./data', train=True, download=True,
                       transform=transforms.ToTensor()),
                       batch_size=args.batch_size, shuffle=True, **kwargs)
    test_loader = data.DataLoader(
        datasets.MNIST('./data', train=False,
                       transform=transforms.ToTensor()),
                       batch_size=args.test_batch_size, shuffle=True, **kwargs)

    if args.binary == 'connect':
        net = model.BinaryConnect(args.in_features, args.out_features)
    else:
        net = model.BinaryNet(args.in_features, args.out_features)
    # net = nn.DataParallel(net)
    print(net)

    if args.cuda:
        net.cuda()

    optimizer = optim.Adam(net.parameters(), lr=args.lr)
    decay = (0.000003 / args.lr) ** (1. / args.epochs)
    scheduler = optim.lr_scheduler.ExponentialLR(optimizer, decay)
    creterion = nn.MultiMarginLoss()

    input_layer = next(net.net.children())
    weights = open('data/weights.csv', 'w')
    stats = open('data/stats.csv', 'w')

    test_epoch(net, creterion, test_loader, args)
    for epoch in range(1, args.epochs+1):
        scheduler.step(epoch - 1)
        tr_loss, tr_acc = train_epoch(epoch, net, creterion, optimizer, train_loader, args)
        tt_loss, tt_acc = test_epoch(net, creterion, test_loader, args)

        items = input_layer.weight.cpu().data.numpy().reshape(-1, )
        items = np.clip(items, -1, 1)
        hist, _ = np.histogram(items, bins=1000, range=(-1., 1.))
        weights.write(','.join(map(str, hist)))
        weights.write('\n')
        weights.flush()

        stats.write('{},{},{},{},{},{}\n'.format(epoch, time.clock(), tr_loss, tr_acc, tt_loss, tt_acc))
        stats.flush()
    weights.close()
    stats.close()