コード例 #1
0
ファイル: certify_mlp.py プロジェクト: liuchen11/PolyEnvelope
    # Configure GPU
    config_visible_gpu(args.gpu)
    use_gpu = args.gpu != 'cpu' and torch.cuda.is_available()
    device = torch.device('cuda:0' if use_gpu else 'cpu')

    # Data set
    if args.dataset.lower() in [
            'mnist',
    ]:
        train_loader, test_loader = mnist(batch_size=args.batch_size,
                                          batch_size_test=args.batch_size)
    elif args.dataset.lower() in [
            'cifar10',
    ]:
        train_loader, test_loader = cifar10(batch_size=args.batch_size,
                                            batch_size_test=args.batch_size)
    else:
        raise ValueError('Unrecognized dataset: %s' % args.dataset)
    assert args.subset in [
        'train', 'test'
    ], 'Subset tag can be only "train" or "test", but %s found' % args.subset
    data_loader = train_loader if args.subset in [
        'train',
    ] else test_loader

    # Parse IO
    assert os.path.exists(
        args.model2load
    ), 'model2load file %s does not exists' % args.model2load
    out_dir = os.path.dirname(args.out_file)
    if out_dir != '' and not os.path.exists(out_dir):
コード例 #2
0
    config_visible_gpu(args.gpu)
    use_gpu = args.gpu != 'cpu' and torch.cuda.is_available()
    device = torch.device('cuda:0' if use_gpu else 'cpu')

    # Data set
    if args.dataset.lower() in [
            'mnist',
    ]:
        train_loader, test_loader = mnist(batch_size=args.batch_size,
                                          batch_size_test=args.batch_size,
                                          normalization=args.normalization)
    elif args.dataset.lower() in [
            'cifar10',
    ]:
        train_loader, test_loader = cifar10(batch_size=args.batch_size,
                                            batch_size_test=args.batch_size,
                                            normalization=args.normalization)
    else:
        raise ValueError('Unrecognized dataset: %s' % args.dataset)

    # Parse IO
    if not os.path.exists(args.out_folder):
        os.makedirs(args.out_folder)

    # Parse model
    model = MLP(in_dim=args.in_dim,
                hidden_dims=args.hidden_dims,
                out_dim=args.out_dim,
                nonlinearity=args.nonlinearity)
    criterion = nn.CrossEntropyLoss()
    model = model.cuda(device) if use_gpu else model
コード例 #3
0
                        type=str,
                        default=None,
                        help='The folder to be scanned')

    args = parser.parse_args()

    if args.dataset.lower() in [
            'mnist',
    ]:
        train_loader, test_loader, classes = mnist(batch_size=args.batch_size,
                                                   shuffle=False,
                                                   data_augmentation=False)
    elif args.dataset.lower() in [
            'cifar10',
    ]:
        train_loader, test_loader, classes = cifar10(
            batch_size=args.batch_size, shuffle=False, data_augmentation=False)
    else:
        raise ValueError('Unrecognized dataset: %s' % args.dataset)

    train_ori_data = []
    test_ori_data = []

    for idx, (data_batch, label_batch) in enumerate(train_loader, 0):
        data_batch = data_batch.reshape(data_batch.size(0), -1)
        train_ori_data.append(data_batch.data.cpu().numpy())
    for idx, (data_batch, label_batch) in enumerate(test_loader, 0):
        data_batch = data_batch.reshape(data_batch.size(0), -1)
        test_ori_data.append(data_batch.data.cpu().numpy())

    train_ori_data = np.concatenate(train_ori_data, axis=0)
    test_ori_data = np.concatenate(test_ori_data, axis=0)
コード例 #4
0
    args = parser.parse_args()

    # Configure GPU
    config_visible_gpu(args.gpu)
    use_gpu = args.gpu != 'cpu' and torch.cuda.is_available()
    device = torch.device('cuda:0' if use_gpu else 'cpu')

    # Data set
    if args.dataset.lower() in [
            'mnist',
    ]:
        train_loader, test_loader = mnist(batch_size=1, batch_size_test=1)
    elif args.dataset.lower() in [
            'cifar10',
    ]:
        train_loader, test_loader = cifar10(batch_size=1, batch_size_test=1)
    else:
        raise ValueError('Unrecognized dataset: %s' % args.dataset)
    assert args.subset in [
        'train', 'test'
    ], 'Subset tag can be only "train" or "test", but %s found' % args.subset
    data_loader = train_loader if args.subset in [
        'train',
    ] else test_loader

    # Parse IO
    assert os.path.exists(
        args.model2load
    ), 'model2load file %s does not exists' % args.model2load
    out_dir = os.path.dirname(args.out_file)
    if out_dir != '' and not os.path.exists(out_dir):