示例#1
0
def main(model_name="CNN", is_training=True):
    config = Config()
    if model_name == "CNN":
        model = models.CNN(config)
    elif model_name == "RNN":
        model = models.RNN(config)
    elif model_name == "RCNN":
        model = models.RCNN(config)
    else:
        model = models.FC(config)

    if is_training:
        model.train()
    else:
        model.restore_model()
        model.predict()
示例#2
0
def get_net(args):
    if args.model == 'densenet':
        net = densenet.DenseNet(growthRate=12,
                                depth=100,
                                reduction=0.5,
                                bottleneck=True,
                                nClasses=10)
    elif args.model == 'lenet':
        net = models.Lenet(args.nHidden, 10, args.proj)
    elif args.model == 'lenet-optnet':
        net = models.LenetOptNet(args.nHidden, args.nineq)
    elif args.model == 'fc':
        net = models.FC(args.nHidden, args.bn)
    elif args.model == 'optnet':
        net = models.OptNet(28 * 28, args.nHidden, 10, args.bn, args.nineq)
    elif args.model == 'optnet-eq':
        net = models.OptNetEq(28 * 28, args.nHidden, 10, args.neq)
    else:
        assert (False)

    return net
示例#3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--no-cuda', action='store_true')
    parser.add_argument('--boardSz', type=int, default=2)
    parser.add_argument('--batchSz', type=int, default=150)
    parser.add_argument('--testBatchSz', type=int, default=200)
    parser.add_argument('--nEpoch', type=int, default=100)
    parser.add_argument('--testPct', type=float, default=0.1)
    parser.add_argument('--save', type=str)
    parser.add_argument('--work', type=str, default='work')
    subparsers = parser.add_subparsers(dest='model')
    subparsers.required = True
    fcP = subparsers.add_parser('fc')
    fcP.add_argument('--nHidden', type=int, nargs='+', default=[100, 100])
    fcP.add_argument('--bn', action='store_true')
    convP = subparsers.add_parser('conv')
    convP.add_argument('--nHidden', type=int, default=50)
    convP.add_argument('--bn', action='store_true')
    optnetP = subparsers.add_parser('optnet')
    # optnetP.add_argument('--nHidden', type=int, default=50)
    # optnetP.add_argument('--nineq', type=int, default=100)
    optnetP.add_argument('--Qpenalty', type=float, default=0.1)
    args = parser.parse_args()

    args.cuda = not args.no_cuda and torch.cuda.is_available()
    t = '{}.{}'.format(args.boardSz, args.model)
    if args.model == 'optnet':
        t += '.Qpenalty={}'.format(args.Qpenalty)
    elif args.model == 'fc':
        t += '.nHidden:{}'.format(','.join([str(x) for x in args.nHidden]))
        if args.bn:
            t += '.bn'
    if args.save is None:
        args.save = os.path.join(args.work, t)
    setproctitle.setproctitle('bamos.sudoku.' + t)

    with open('data/{}/features.pt'.format(args.boardSz), 'rb') as f:
        X = torch.load(f)
    with open('data/{}/labels.pt'.format(args.boardSz), 'rb') as f:
        Y = torch.load(f)

    N, nFeatures = X.size(0), int(np.prod(X.size()[1:]))

    nTrain = int(N * (1. - args.testPct))
    nTest = N - nTrain

    trainX = X[:nTrain]
    trainY = Y[:nTrain]
    testX = X[nTrain:]
    testY = Y[nTrain:]

    assert (nTrain % args.batchSz == 0)
    assert (nTest % args.testBatchSz == 0)

    save = args.save
    if os.path.isdir(save):
        shutil.rmtree(save)
    os.makedirs(save)

    npr.seed(1)

    print_header('Building model')
    if args.model == 'fc':
        # nHidden = 2*nFeatures-1
        nHidden = args.nHidden
        model = models.FC(nFeatures, nHidden, args.bn)
    elif args.model == 'optnet':
        model = models.OptNet(args.boardSz, args.Qpenalty)
    else:
        assert (False)

    if args.cuda:
        model = model.cuda()

    fields = ['epoch', 'loss', 'err']
    trainF = open(os.path.join(save, 'train.csv'), 'w')
    trainW = csv.writer(trainF)
    trainW.writerow(fields)
    trainF.flush()
    fields = ['epoch', 'loss', 'err']
    testF = open(os.path.join(save, 'test.csv'), 'w')
    testW = csv.writer(testF)
    testW.writerow(fields)
    testF.flush()

    if args.model == 'optnet':
        # if args.tvInit: lr = 1e-4
        # elif args.learnD: lr = 1e-2
        # else: lr = 1e-3
        lr = 1e-1
    else:
        lr = 1e-3
    optimizer = optim.Adam(model.parameters(), lr=lr)

    writeParams(args, model, 'init')
    test(args, 0, model, testF, testW, testX, testY)
    for epoch in range(1, args.nEpoch + 1):
        # update_lr(optimizer, epoch)
        train(args, epoch, model, trainF, trainW, trainX, trainY, optimizer)
        test(args, epoch, model, testF, testW, testX, testY)
        torch.save(model, os.path.join(args.save, 'latest.pth'))
        writeParams(args, model, 'latest')
        os.system('./plot.py "{}" &'.format(args.save))
示例#4
0

##############################################################################################
##############################################################################################
if __name__ == "__main__":

    # Data Preparation
    data = Data(config.Mode)
    data.load()
    config.uniq_char = len(data.char_list)

    # Model Preparation
    cnn_models = models.Conv1d(config.cnn_w, config.pool_w,
                               config.cnn_output_feature,
                               data.char_dict).cuda()
    linear_model = models.FC(config.fc_input_feature, config.fc_hidden_feature,
                             config.class_n).cuda()
    loss_model = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.SGD(parameters(cnn_models, linear_model),
                                lr=learning_rate,
                                momentum=config.momentum)

    for i in range(config.num_epochs):
        # Training
        batch_loss = 0.
        train_batches = data.train_batch_iter(config.batch_size)
        for j, train_batch in enumerate(train_batches):
            star_batch, text_batch, len_batch = zip(*train_batch)
            batch_loss += run(star_batch, text_batch, len_batch, step=1)
            if (j + 1) % 1000 == 0:
                print("batch #{:d}: ".format(j + 1), "batch_loss :",
                      batch_loss / j, datetime.datetime.now())
示例#5
0
def main(args):
    if args.mnist:
        # Normalize image for MNIST
        # normalize = Normalize(mean=(0.1307,), std=(0.3081,))
        normalize = None
        args.input_size = 784
    elif args.cifar:
        normalize = utils.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
        args.input_size = 32 * 32 * 3
    else:
        # Normalize image for ImageNet
        normalize = utils.Normalize(mean=[0.485, 0.456, 0.406],
                                    std=[0.229, 0.224, 0.225])
        args.input_size = 150528

    # Load data
    train_loader, test_loader = utils.get_data(args)

    # The unknown model to attack
    unk_model = utils.load_unk_model(args)

    # Try Whitebox Untargeted first
    if args.debug:
        ipdb.set_trace()

    if args.train_vae:
        encoder, decoder, vae = train_mnist_vae(args)
    else:
        encoder, decoder, vae = None, None, None

    if args.train_ae:
        encoder, decoder, ae = train_mnist_ae(args)
    else:
        encoder, decoder, ae = None, None, None

    # Add A Flow
    norm_flow = None
    if args.use_flow:
        # norm_flow = flows.NormalizingFlow(30, args.latent).to(args.device)
        norm_flow = flows.Planar
    # Test white box
    if args.white:
        # Choose Attack Function
        if args.no_pgd_optim:
            white_attack_func = attacks.L2_white_box_generator
        else:
            white_attack_func = attacks.PGD_white_box_generator

        # Choose Dataset
        if args.mnist:
            G = models.Generator(input_size=784).to(args.device)
        elif args.cifar:
            if args.vanilla_G:
                G = models.DCGAN().to(args.device)
                G = nn.DataParallel(G.generator)
            else:
                G = models.ConvGenerator(models.Bottleneck,[6,12,24,16],growth_rate=12,\
                                     flows=norm_flow,use_flow=args.use_flow,\
                                     deterministic=args.deterministic_G).to(args.device)
                G = nn.DataParallel(G)
            nc, h, w = 3, 32, 32

        if args.run_baseline:
            attacks.whitebox_pgd(args, unk_model)

        pred, delta = white_attack_func(args, train_loader,\
                test_loader, unk_model, G, nc, h, w)

    # Blackbox Attack model
    model = models.GaussianPolicy(args.input_size,
                                  400,
                                  args.latent_size,
                                  decode=False).to(args.device)

    # Control Variate
    cv = to_cuda(models.FC(args.input_size, args.classes))