Пример #1
0
def test_k(args, k_values):
    """
    Test different k values for both weight and unit pruning.
    :param args: argparse arguments
    :param k_values: list of k values to test
    :return: list of accuracies for weight pruning, list of accuracies for unit pruning
    """
    train_unparsed_dataset, test_unparsed_dataset, train_size, test_size = load_mnist(
    )
    test_dataset = test_unparsed_dataset.map(flatten_function)
    batched_test = test_dataset.shuffle(buffer_size=test_size).batch(
        args.batch_size)

    checkpoint_prefix, checkpoint_dir = get_prefix(args)
    model = SparseNN(ptype=args.ptype)
    checkpoint = tf.train.Checkpoint(model=model)

    weights_accuracies = []
    for k in k_values:
        # we have to reload our model each time, as we rewrite our weights every time.
        # this technically shouldn't be necessary assuming all k's are sorted, but
        # it's better to be safe.
        checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
        model.set_params(k / 100, "weights")
        weights_accuracies.append(evaluate_model(model, batched_test))

    nodes_accuracies = []
    for k in k_values:
        # same as above.
        checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
        model.set_params(k / 100, "nodes")
        nodes_accuracies.append(evaluate_model(model, batched_test))

    return weights_accuracies, nodes_accuracies
Пример #2
0
def main():
    #load the data
    N_data, train_images, train_labels, test_images, test_labels = data.load_mnist(
    )
    train_size = 10000  #number of train_data images
    bin_train_data = np.where(bin_train_data[:train_size, :] >= 0.5, 1, 0)
    bin_test_data = np.where(bin_train_data >= 0.5, 1, 0)
    print(N_data)
def main():
    model = init_mnist_model(pretrained=False)
    attack = FGSM(model)

    _, (X_test, y_test) = load_mnist()
    for epsilon in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
        test_model_resilience(model, attack, X_test[0:100], y_test[0:100], epsilon=epsilon)

    """
Пример #4
0
def FederatedTrain(args):

    if args.dataset == 'MNIST':
        dataset = data.load_mnist()
        dataloaders_train, dataloader_test = data.create_split_dataloaders(
            dataset = dataset,
            args=args
        )
        dataiters_train = [iter(loader) for loader in dataloaders_train]
        dataiters_test = iter(dataloader_test)
        n_channels = 1
    else:
        print 'Dataset Is Not Supported'
        exit(1)


    n_clients = args.n_clients

    global_net = net.LeNet(n_channels = n_channels)
    print global_net

    learner = FederatedLearner(net = global_net, args = args)
    learner.gpu = args.gpu

    model_dir = args.model_dir
    global_model_name = args.global_model_name
    global_optim_name = args.global_optimizor_name
    global_model_suffix = global_model_suffix = '_init_.pth'
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
    torch.save(learner.net.state_dict(), model_dir + global_model_name + global_model_suffix)
    print "Model saved"

    for t in range(args.epochs):
        if t == 0:
            global_model_suffix = '_init_.pth'
        else:
            global_model_suffix = '_{cur}.pth'.format(cur=t-1)

        learner.load_model(model_path = model_dir + global_model_name + global_model_suffix)

        for i in range(n_clients):
            print 't=', t, 'client model idx=', i
            try:
                batchX, batchY = next(dataiters_train[i])
            except StopIteration:
                dataiters_train[i] = iter(dataloaders_train[i])
                batchX, batchY = next(dataiters_train[i])

            learner.comp_grad(i, batchX, batchY)
        learner._update_model()

        global_model_suffix = '_{cur}.pth'.format(cur=t)
        torch.save(learner.net.state_dict(), model_dir + global_model_name + global_model_suffix)

        if (t+1) % args.n_eval_iters == 0:
            learner._evalTest(test_loader = dataloader_test)
Пример #5
0
def repeat_exps( args ):
    '''
    repeat training CNNs and record learning curves
    '''
    print( '-== {}-{} ==-'.format( args.dataset, args.alg ) )
    print( 'lrate={0} dropout={1} batch_size={2} epochs={3}'.format( args.lrate, args.dropout, args.batch_size, args.epochs ) )
    print( 'repeat={0}'.format( args.repeat ) )

    if args.dataset == 'mnist':
        data.load_mnist( args )
    elif args.dataset.startswith( 'cifar' ):
        data.load_cifar( args )
    else:
        print( "unknown dataset", args.dataset )
        sys.exit(0)

    curves = []
    for _ in range( args.repeat ):
        train_history, test_history = exp( args )
        print( '{:3d}'.format( _ ), end=' ' )
        print( 'train={:.4f}'.format( train_history[-1] ), end='  ' )
        print( 'test={:.4f}'.format(  test_history[-1] ) )
        curves.append( ( train_history, test_history ) )
    curves = np.array( curves )

    # save results
    filename = '{}_{}_{}'.format( args.dataset, args.arch, args.alg )
    filename += '_lr{}'.format( args.lrate )
    if args.alg.startswith( 'q' ):
        if args.const:
            filename += '_c{}'.format( args.init_v )
        else:
            filename += '_a{}'.format( args.init_v )
    elif args.alg[0] == 'n':
        filename += '_n{}'.format( args.init_v )

    if args.dropout: filename += '_dropout'

    np.savez( filename, curves=curves )
    print( 'results saved to {}'.format( filename ) )
Пример #6
0
def main():
    make_ckpt_data_sample_dirs()

    config_path = sys.argv[1]
    with open(config_path, "r") as config_fp:
        config = yaml.full_load(config_fp)

    input_dims = config["input_dims"]
    input_size = np.prod(input_dims)
    hidden_size = config["hidden_size"]
    latent_size = config["latent_size"]
    device = torch.device(config["device"])
    num_epochs = config["epochs"]
    save_freq = config["save_freq"]

    if config["dataset"] == "mnist":
        train_loader, val_loader, test_loader = load_mnist()
        data_cmap = "Greys_r"
    else:
        train_loader, val_loader, test_loader = load_centered_dspirtes()
        data_cmap = "RGB"

    model = VAE(
        input_size=input_size,
        hidden_size=hidden_size,
        latent_size=latent_size,
        device=device,
    )
    optimizer = torch.optim.Adam(model.parameters())

    model.to(device)

    train_model(
        model,
        train_loader,
        val_loader,
        num_epochs,
        optimizer,
        device,
        image_dims=input_dims,
        save_freq=save_freq,
        data_cmap=data_cmap,
    )
    test_loss = test_model(model, test_loader, device)

    print("Test loss: " + str(test_loss))

    plot_reconstructions(model, next(iter(test_loader)), device, num_epochs,
                         input_dims, data_cmap)
    plot_samples_from_prior(model, device, num_epochs, input_dims, data_cmap)
    save_checkpoint(model, num_epochs)
Пример #7
0
def train_mnist_full(output_size=10,
                     epoch_num=100,
                     batch_size=100,
                     l2=1e-5,
                     learning_rate=1e-3,
                     d=9):
    all_inx = sio.loadmat(
        './data/mnist/mnist_shuffle_inx10.mat')['mnist_shuffle_inx10']
    result = []
    times = 1
    if config.test_times == -1:
        times = len(all_inx)
    for i in range(times):
        # for dd in all_data:
        if config.test_times == -1:
            inx = i
        else:
            inx = config.test_times
        from data import load_mnist
        all_data = load_mnist(all_inx[inx, :], D=1)

        if type(all_data) is tuple:
            all_data = [all_data]
            input_size = all_data[0][1]
        else:
            _input_size = all_data[0][1]
            input_size = []
            for _i in _input_size:
                input_size.append(tuple(_i.reshape([-1]).tolist()))
        for dd in all_data:
            _all_data = dd[0]
            from model import create_mnist_full_model
            model, predit_model = create_mnist_full_model(
                input_size, output_size, l2, learning_rate)
            model.summary()
            print("lambda_cca1: " + str(config.lambda_cca1) +
                  '       index: ' + str(inx))
            result.append(
                train_model(model,
                            _all_data,
                            epoch_num,
                            batch_size,
                            predit_model,
                            d=d,
                            model_path='tmp/mnist_full_model.h5'))
    return result
def load_data():
    """Binarized training data; first 10k for train, second 10k for testing."""
    N, train_images, train_labels, _, _ = load_mnist()
    print("Loading training data...")

    print(
        f"MNIST loaded train: {train_images.shape} labels: {train_labels.shape}"
    )

    def binarise(images):
        on = images > 0.5
        images = images * 0.0
        images[on] = 1.0
        return images

    print("Binarising training data...")
    train_images = binarise(train_images)
    train_images_, train_labels_ = train_images[0:10000], train_labels[0:10000]
    test_images_, test_labels_ = train_images[10000:20000], train_labels[
        10000:20000]

    return train_images_, train_labels_, test_images_, test_labels_
def init_mnist_model(pretrained=False):
    if pretrained:
        return load_model("mnist_model.h5")

    else:
        model = Sequential()
        model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28,28,1)))
        model.add(Conv2D(32, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(64, activation='relu'))
        model.add(Dropout(0.25))
        model.add(Dense(10, activation='softmax'))

        model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
        model.summary()

        (X_train, y_train), (X_test, y_test) = load_mnist()
        model.fit(X_train, y_train, batch_size=64, epochs=10,
                  validation_data=(X_test, y_test),
                  callbacks=[JSONLogger()], verbose=0)

        return model
Пример #10
0
def main():
    argparser = argparse.ArgumentParser()
    argparser.add_argument('--nsamples', type=int, default=60000)
    argparser.add_argument('--batch_size', type=int, default=100)

    argparser.add_argument('--width', type=int, default=1000)
    argparser.add_argument('--lmbd', type=float, default=0.0)
    argparser.add_argument('--weight_decay', type=float, default=0)
    argparser.add_argument('--init_fac', type=float, default=1)
    argparser.add_argument('--nepochs', type=int, default=100)
    argparser.add_argument('--lr', type=float, default=0.01)
    argparser.add_argument('--ntries', type=int, default=1)
    argparser.add_argument('--watch_lp', action='store_true')
    argparser.add_argument('--device', default='cuda')
    argparser.add_argument('--dir', default='checkpoints/')
    args = argparser.parse_args()
    device = torch.device(args.device)

    # Data Load
    train_dl, test_dl = load_mnist(batch_size=args.batch_size,
                                   nsamples=args.nsamples,
                                   root='./data/mnist',
                                   one_hot=False,
                                   classes=[3, 8])

    # repeat experiments of n times
    res_t = []
    for _ in range(args.ntries):
        net = TwoLayerNet(784, args.width, 1, args.init_fac).to(device)

        lmbd_base = math.log10(784)/args.nsamples
        lmbd_base = math.sqrt(lmbd_base)
        res,_ = train_model(args, net, train_dl, test_dl, device, lmbd_base)
        res_t.append(res)
    print(res_t)
    tr_err, tr_acc, te_err, te_acc, pnorm, l2norm = zip(*res_t)

    res = {
        'dataset': 'mnist',
        'nsamples': args.nsamples,
        'weight_decay': args.weight_decay,
        'lambda': args.lmbd,
        'width': args.width,
        'init_fac': args.init_fac,
        'batch_size': args.batch_size,
        'lr': args.lr,
        'train_error': tr_err,
        'train_accuracy': tr_acc,
        'test_error': te_err,
        'test_accuracy': te_acc,
        'path_norm': pnorm,
        'l2norm': l2norm
    }

    file_prefix = 'width%d_lmbd%.0e_wd%.0e_lr%.1e_init%.1f_bz%d_nsamples%d' % (
                  args.width, args.lmbd, args.weight_decay, args.lr, args.init_fac,
                  args.batch_size,args.nsamples)

    if args.watch_lp:
        watch_learning_process(records, file_prefix)

    if not os.path.exists(args.dir):
        os.makedirs(args.dir)
    with open('%s/%s_.pkl' % (args.dir,file_prefix), 'wb') as f:
        pickle.dump(res, f)
Пример #11
0
                    help="Number of stacked arm layers")
args = parser.parse_args()
iteration = args.iteration
threshold = args.threshold
nb_epoch = args.epoch
trainSize = args.trainSize
testSize = args.testSize
layers = args.layers
nb_epoch = args.epoch * layers
batchSize = args.batchSize
lr = args.lr
resultFile = args.resultFile

(X_train,
 Y_train), (X_test,
            Y_test), datagen, test_datagen, nb_classes = data.load_mnist()
X_train = X_train[:trainSize]
X_test = X_test[:testSize]
vis(X_test * 255, "orig.png")
nb_features = np.prod(X_test.shape[1:])

model = build_encode_decode_layers(input_shape=X_test.shape,
                                   iteration=iteration,
                                   threshold=threshold,
                                   dict_size_list=dict_size_list,
                                   lr=lr,
                                   layers=layers)

#fit the model on the batches generated by datagen.flow()
model.fit_generator(datagen.flow(X_train,
                                 X_train,
Пример #12
0
import data
import autograd.numpy as np

from scipy.special import logsumexp
from Q1_reg import binarize_data
import matplotlib.pyplot as plt
np.random.seed(14)

N_data, train_images, train_labels, test_images, test_labels = data.load_mnist(
)
D = 784
C = 10


def log_softmax(X, weight):
    '''
    :param: train_images:= NxD
    :param weight: DxC matrix, where D:= dimension of data; C:= num of classes
    :return: NxC matrix. Each row is probability dist for corresponding data
    '''
    z = np.dot(X, weight)
    deno = logsumexp(z, axis=1)
    return ((z - deno[:, np.newaxis]))


def neg_log_likelihood(labels, z):

    loss = np.mean(np.multiply(labels, (z)))
    return -loss

Пример #13
0
    pool8 = nn.MaxPool2d(kernel_size=3, stride=2, padding=0)
    layers.append(pool8)
    # 填写输入,输出神经元数
    fc9 = nn.Linear(9216, 4096)
    layers.append(fc9)
    fc10 = nn.Linear(4096, 4096)
    layers.append(fc10)
    fc11 = nn.Linear(4096, 10)
    layers.append(fc11)
    #打印出往略得参数量
    print_params_num(layers)
    # 记载数据
    # minst 2828 dataset 60000 samples
    #mndata = MNIST('../week4/mnist/python-mnist/data/')
    #image_data_all, image_label_all = mndata.load_training()
    image_data_all, image_label_all = load_mnist('../week_4/mnist',
                                                 kind='train')
    image_data = image_data_all[0:100]
    image_label = image_label_all[0:100]
    # 使用未训练的模型处理数据
    y = model(image_data, layers)
    pdb()
    # 使用为训练得模型测试
    print("初始的未训练时模型的acc=%s" %
          (get_acc(image_data, image_label, layers, 80, 100)))
    pdb()
    # 对模型进行训练:
    train_model(image_data, image_label, layers, lr)
    # 训练完成,对模型进行测试,给出测试结果:
    print("训练完成后模型的acc=%s" %
          (get_acc(image_data, image_label, layers, 80, 100)))
Пример #14
0
if __name__ == '__main__':
    # Model hyper-parameters
    latent_dim = 10
    data_dim = 784  # How many pixels in each image (28x28).
    gen_layer_sizes = [latent_dim, 300, 200, data_dim]
    rec_layer_sizes = [data_dim, 200, 300, latent_dim * 2]

    # Training parameters
    param_scale = 0.01
    batch_size = 200
    num_epochs = 15
    step_size = 0.001

    print("Loading training data...")
    N, train_images, _, test_images, _ = load_mnist()
    on = train_images > 0.5
    train_images = train_images * 0 - 1
    train_images[on] = 1.0

    init_gen_params = init_net_params(param_scale, gen_layer_sizes)
    init_rec_params = init_net_params(param_scale, rec_layer_sizes)
    combined_init_params = (init_gen_params, init_rec_params)

    num_batches = int(np.ceil(len(train_images) / batch_size))
    def batch_indices(iter):
        idx = iter % num_batches
        return slice(idx * batch_size, (idx+1) * batch_size)

    # Define training objective
    seed = npr.RandomState(0)
Пример #15
0
    model = Sequential()

    model.add(Convolution2D(4, 5, 5, border_mode='valid',
                            input_shape=(1, 28, 28)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(12, 5, 5, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(10))
    model.add(Activation('softmax'))

    sgd = SGD(lr=0.01, momentum=0.9, decay=1e-6, nesterov=True)
    model.compile(optimizer=sgd, loss='categorical_crossentropy')

    model.fit(x_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
            show_accuracy=True, verbose=1, shuffle=True,
            validation_data=(x_test, y_test))

    score = model.evaluate(x_test, y_test, show_accuracy=True, verbose=0)
    print('Test score:', score[0])
    print('Test accuracy:', score[1])

X_train, Y_train, X_test, Y_test = load_mnist()
mnist_cnn(X_train, Y_train, X_test, Y_test)
Пример #16
0
    return np.mean(predicted_class == target_class)


if __name__ == '__main__':
    # Model parameters
    layer_sizes = [784, 200, 100, 10]
    L2_reg = 1.0

    # Training parameters
    param_scale = 0.1
    batch_size = 256
    num_epochs = 5
    step_size = 0.001

    print("Loading training data...")
    N, train_images, train_labels, test_images,  test_labels = load_mnist()

    init_params = init_random_params(param_scale)

    num_batches = int(np.ceil(len(train_images) / batch_size))
    def batch_indices(iter):
        idx = iter % num_batches
        return slice(idx * batch_size, (idx+1) * batch_size)

    # Define training objective
    def objective(params, iter):
        idx = batch_indices(iter)
        return -log_posterior(params, train_images[idx], train_labels[idx], L2_reg)

    # Get gradient of objective using autograd.
    objective_grad = grad(objective)
Пример #17
0
    return print_row


if __name__ == '__main__':
    # settings
    batch_size = 128
    step_size = 1e-3
    num_epochs = 100

    # set up model and parameters
    mlp, mlp_params = init_mlp(784, [(200, tanh), (100, tanh), (10, identity)])

    # load data and set up batch-getting function
    num_data, (train_images, train_labels, test_images,
               test_labels) = to_gpu(load_mnist())
    num_batches = num_data // batch_size

    def get_batch(step):
        start_index = (step % num_batches) * batch_size
        batch = lambda x: tf.slice(x, (start_index, 0), (batch_size, -1))
        return batch(train_images), batch(train_labels)

    # set up objective and other progress measures
    step = tf.Variable(0, trainable=False)
    cost = negative_log_likelihood(mlp, get_batch(step))
    train_accuracy = accuracy(mlp, train_images, train_labels)
    test_accuracy = accuracy(mlp, test_images, test_labels)

    # set up ops
    train_op = tf.train.AdamOptimizer(step_size).minimize(cost,
Пример #18
0
import numpy

import theano
import theano.tensor as T

import data
import model

import itertools

batch_size = 10

train_data, valid_data, test_data = data.load_mnist('mnist.pkl.gz')

index = T.lscalar()
x = T.matrix('x')
y = T.ivector('y')

p_y_given_x, layer_params = model.meta(x)
params = list(itertools.chain(*layer_params))
cost = model.negative_log_likelihood(p_y_given_x, y)

errors = model.errors(p_y_given_x, y)
validate_model = data.build_validation_function(data.batch(valid_data, batch_size=1000), errors, x, y)

n_epochs = 500
learning_rate = 0.01
L1_lambda = 0.001
L2_lambda = 0.001

train_batched = data.batch(train_data, batch_size)
Пример #19
0
from data import load_mnist
from scipy.linalg import svd
import matplotlib.pyplot as plt
import numpy as np

train, test = load_mnist()
X_train, y_train = train
X_test, y_test = test

X_train = X_train / 255.
X_test = X_test / 255.
mean = np.mean(X_train, axis=0)
X_train = X_train - mean
X_test = X_test - mean

n_components = 2
U, S, V = svd(X_train, full_matrices=False)
basis = V[:n_components].T
test_classes = np.argmax(y_test, axis=1)
f, axarr = plt.subplots(5, 2)
for i in np.unique(test_classes):
    test_examples = np.where(test_classes == i)[0]
    X_proj = X_test[test_examples].dot(basis)
    axarr.ravel()[i].scatter(X_proj[:, 0], X_proj[:, 1], color="steelblue")
    axarr.ravel()[i].set_title("Class %i" % i)
    axarr.ravel()[i].set_xlim([-.025, .025])
    axarr.ravel()[i].set_ylim([-.025, .025])
plt.tight_layout()
plt.show()
Пример #20
0
def mlp_mnist_sgd_experiment(rng, sample_size, hidden_size, depth, initializer,
                             learning_rate, momentum, nesterov, epochs,
                             batch_size):
    X, Y = mix_datasets(*mnist_mlp_connector(load_mnist()))
    x_train, y_train, inds = sample_train((X, Y), sample_size, rng)
    assert len(x_train) == len(y_train)
    print(f"Sampled {len(x_train)} datapoints iid")

    model = mlp(Y.shape[1],
                depth=depth,
                hidden=hidden_size,
                initializer=initializer)
    opt = tf.keras.optimizers.SGD(learning_rate=learning_rate,
                                  momentum=momentum,
                                  nesterov=nesterov)
    loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True,
                                                   label_smoothing=0)
    model.compile(optimizer=opt,
                  loss=loss,
                  metrics=[
                      tf.keras.metrics.CategoricalAccuracy(name="accuracy",
                                                           dtype=None)
                  ],
                  loss_weights=None,
                  weighted_metrics=None,
                  run_eagerly=None,
                  steps_per_execution=None)

    model_extra_summary(model)

    print(
        f"Training model for {epochs} epochs and with {batch_size} batch size."
    )
    model.fit(x=x_train,
              y=y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=0,
              callbacks=None,
              validation_split=0.0,
              validation_data=None,
              shuffle=True,
              class_weight=None,
              sample_weight=None,
              initial_epoch=0,
              steps_per_epoch=None,
              validation_steps=None,
              validation_freq=1,
              max_queue_size=10,
              workers=1,
              use_multiprocessing=False)
    # DEBUG
    model.summary()

    # measure generalization error
    train_results = model.evaluate(x=x_train,
                                   y=y_train,
                                   batch_size=None,
                                   verbose=0,
                                   sample_weight=None,
                                   steps=None,
                                   callbacks=None,
                                   max_queue_size=10,
                                   workers=1,
                                   use_multiprocessing=False,
                                   return_dict=True)
    expected_results = model.evaluate(x=X,
                                      y=Y,
                                      batch_size=None,
                                      verbose=0,
                                      sample_weight=None,
                                      steps=None,
                                      callbacks=None,
                                      max_queue_size=10,
                                      workers=1,
                                      use_multiprocessing=False,
                                      return_dict=True)

    (Xtr_uniq, Ytr_uniq), (Xtest, Ytest) = retrieve_split((X, Y), inds)
    train_unique_results = model.evaluate(x=Xtr_uniq,
                                          y=Ytr_uniq,
                                          batch_size=None,
                                          verbose=0,
                                          sample_weight=None,
                                          steps=None,
                                          callbacks=None,
                                          max_queue_size=10,
                                          workers=1,
                                          use_multiprocessing=False,
                                          return_dict=True)

    train_risk = 1 - train_results["accuracy"]
    expected_risk = 1 - expected_results["accuracy"]
    train_unique_risk = 1 - train_unique_results["accuracy"]
    test_risk = 1. / len(Ytest) * (len(Y) * expected_risk -
                                   len(Ytr_uniq) * train_unique_risk)
    generalization = expected_risk - train_risk

    return {
        "train_risk": train_risk,
        "expected_risk": expected_risk,
        "generalization": generalization,
        "test_risk": test_risk,
        "train_unique_risk": train_unique_risk
    }
def experiment(config, output_dir, load=False, verbose=2, seed=42):
    tf.set_random_seed(seed)

    net_config = config["networks"]
    train_config = config["train"]
    test_config = config["test"]
    data_config = config["data"]
    results_config = config["results"]

    model_dir = os.path.join(output_dir, "model")
    images_dir = os.path.join(output_dir, "images")
    summary_dir = {
        "train": os.path.join(output_dir, "summary_train"),
        "test": os.path.join(output_dir, "summary_test"),
        "save": os.path.join(output_dir, "summary_save")
    }

    nb_iter = train_config["nb_iter"]
    test_every = train_config["test_every"]
    save_every = train_config["save_every"]

    verbose = verbose

    # =========== Load data ===========

    if verbose >= 1:
        print("[*] Loading data...")

    X_source_train, X_source_test, Y_source_train, Y_source_test = load_svhn(
        data_config["svhn"])
    X_target_train, X_target_test, Y_target_train, Y_target_test = load_mnist(
        data_config["mnist"])

    # =========== Start the session ===========

    with tf.Session() as sess:
        if verbose >= 1:
            print("[*] Building model...\n")
        ada = ADA(config, output_dir, sess, verbose=verbose)
        ada.build_model(summary_dir)

        # =========== Load the model and create the directories ===========

        if load:
            if verbose >= 1:
                print("[*] Loading existing model...")
            ada.load(model_dir)
        else:
            os.makedirs(images_dir)
            os.makedirs(model_dir)

        # ============= Training ==============

        if verbose >= 1:
            print("---------------------- Training ----------------------\n")

        while ada.iter < nb_iter:
            ada.train(X_source_train, X_target_train, Y_source_train,
                      Y_target_train, summary_dir["train"])

            if ada.iter % test_every == 0:
                ada.test(X_source_test, X_target_test, Y_source_test,
                         Y_target_test)
            if ada.iter % save_every == 0:
                ada.save_model(model_dir)
                ada.save_images(X_source_test,
                                X_target_test,
                                images_dir,
                                nb_images=results_config["nb_images"])

            ada.iter += 1

        # ============= Testing ==============

        if verbose >= 1:
            print("---------------------- Testing ----------------------\n")

        acc = ada.test(X_source_test,
                       X_target_test,
                       Y_source_test,
                       Y_target_test,
                       test_all=True)

        if verbose >= 1:
            print("Final accuracy: {:0.5f}\n".format(acc))
Пример #22
0
             tf.nn.softmax(preds["n1"])],
            feed_dict={
                placeholders["i0"]: inputs["i0"],
                placeholders["i1"]: inputs["i1"]
            })
        sess.close()
    # Return both accuracies
    return accuracy_error(res[0], outputs["o0"]), accuracy_error(
        res[1], outputs["o1"])


if __name__ == "__main__":

    fashion_x_train, fashion_y_train, fashion_x_test, fashion_y_test = load_fashion(
    )
    mnist_x_train, mnist_y_train, mnist_x_test, mnist_y_test = load_mnist()

    OHEnc = OneHotEncoder(categories='auto')

    fashion_y_train = OHEnc.fit_transform(np.reshape(fashion_y_train,
                                                     (-1, 1))).toarray()

    fashion_y_test = OHEnc.fit_transform(np.reshape(fashion_y_test,
                                                    (-1, 1))).toarray()

    mnist_y_train = OHEnc.fit_transform(np.reshape(mnist_y_train,
                                                   (-1, 1))).toarray()

    mnist_y_test = OHEnc.fit_transform(np.reshape(mnist_y_test,
                                                  (-1, 1))).toarray()
Пример #23
0
    Make a description for our checkpoints.
    :param args: arguments from argparse
    :param exclude: arguments to exclude from argparse
    :return: description of experiment run
    """
    all_args = vars(args).items()
    included_args = [k + "_" + str(v) for k, v in all_args if k not in exclude]
    return '-'.join(included_args)


if __name__ == "__main__":
    tf.enable_eager_execution()
    args = get_args()

    # load model
    model = SparseNN(ptype=args.ptype)

    # load dataset
    train_unparsed_dataset, test_unparsed_dataset, train_size, test_size = load_mnist(
    )
    train_dataset = train_unparsed_dataset.map(flatten_function)

    # shuffle and batch dataset
    batched_train = train_dataset.shuffle(buffer_size=train_size).batch(
        args.batch_size)

    # initialize trainer
    trainer = SparseTrainer(model, batched_train, args)

    trainer.train()
Пример #24
0
def log_images(generated_images, field1, field2):
    image = combine_images(generated_images)
    image = image * 127.5 + 127.5
    generated_images_output = "generated/" + str(field1) + "_" + str(field2) + ".png"
    Image.fromarray(image.astype(np.uint8)).save(generated_images_output)


EPOCHS = 100
BATCH_SIZE = 1024
RESIZE = None
INPUT_IMAGE_SIZE = 28

GENERATOR_INPUT_NOISE_DIM = 100

X_train, y_train = load_mnist(normalize=True, resize=RESIZE)
generator, discriminator, generator_with_d = get_model()

logger = Tensorboard("logs")
for epoch in range(EPOCHS):
    print("epoch = {}".format(epoch))
    batches = int(X_train.shape[0] / BATCH_SIZE)
    for i in tqdm(range(batches)):
        step = (epoch * batches) + i
        print("step = {}".format(step))

        noise = np.random.uniform(-1.0, 1.0, (BATCH_SIZE, GENERATOR_INPUT_NOISE_DIM))
        generated = generator.predict(noise)
        if i % 200 == 0:
            log_images(generated, epoch, i)
            log_images(generated, "last", "last")
Пример #25
0
                        (epoch - 1) * len(self.train_loader.dataset)))
                    self.train_losses_discriminator.append(d_loss.item())
                    self.train_losses_generator.append(g_loss.item())

                    torch.save(self.generator.state_dict(),
                               os.path.join(self.save_path, 'generator.pth'))

                    torch.save(
                        self.discriminator.state_dict(),
                        os.path.join(self.save_path, 'discriminator.pth'))

                    torch.save(
                        self.optimizer_G.state_dict(),
                        os.path.join(self.save_path,
                                     'optimizer_generator.pth'))

                    torch.save(
                        self.optimizer_G.state_dict(),
                        os.path.join(self.save_path,
                                     'optimizer_discriminator.pth'))

            self.save_samples_of_generated_images(n_row=10, batches_done=epoch)


if __name__ == '__main__':
    from data import load_mnist

    train_loader, test_loader = load_mnist('/tmp')
    trainer = CGANTrainer(train_loader, save_path='/tmp')
    trainer.train()
Пример #26
0
parser.add_argument('-e',
                    '--epochs',
                    type=int,
                    metavar='E',
                    default=10,
                    help='Number of epochs to train for.')
parser.add_argument('-w',
                    '--use-wandb',
                    action='store_true',
                    help='If set, log metrics to Weights and Biases')

cmd = parser.parse_args()

if cmd.dataset == 'MNIST':
    input_shape = (28 * 28, )
    train_data, test_data = data.load_mnist()
else:
    input_shape = (32 * 32 * 3, )
    train_data, test_data = data.load_cifar()

if cmd.use_wandb:
    config = {
        'Experts': cmd.n_experts,
        'Dataset': cmd.dataset,
        'Epochs': cmd.epochs
    }
else:
    config = None

m = model.MOE_Model(model.get_experts(cmd.n_experts, input_shape),
                    model.get_gate(cmd.n_experts, input_shape))
Пример #27
0
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--binarize', action='store_true')
parser.add_argument('--pruning', action='store_true')
args = parser.parse_args()

if args.debug:
    chainer.set_debug(True)


np.random.seed(args.seed)
if args.gpu >= 0:
    cuda.cupy.random.seed(args.seed)


(x_labeled, y_labeled, x_test, y_test,
 x_unlabeled, D, T) = data.load_mnist(pruning=args.pruning)
labeled_data = feeder.DataFeeder((x_labeled, y_labeled))
test_data = feeder.DataFeeder((x_test, y_test))
unlabeled_data = feeder.DataFeeder(x_unlabeled)
N_labeled = len(labeled_data)
N_unlabeled = len(unlabeled_data)

gamma = args.beta * float(N_labeled + N_unlabeled) / N_labeled
model = net.ADGM(D, args.a_dim, T, args.z_dim, args.h_dim, gamma)
model.verbose = args.verbose
if args.gpu >= 0:
    model.to_gpu(args.gpu)
xp = cuda.cupy if args.gpu >= 0 else np


optimizer = optimizers.Adam(alpha=args.alpha)
Пример #28
0
import numpy

import theano
import theano.tensor as T

import data
import model

import itertools

batch_size = 10

train_data, valid_data, test_data = data.load_mnist('mnist.pkl.gz')

index = T.lscalar()
x = T.matrix('x')
y = T.ivector('y')

p_y_given_x, layer_params = model.meta(x)
params = list(itertools.chain(*layer_params))
cost = model.negative_log_likelihood(p_y_given_x, y)

errors = model.errors(p_y_given_x, y)
validate_model = data.build_validation_function(
    data.batch(valid_data, batch_size=1000), errors, x, y)

n_epochs = 500
learning_rate = 0.01
L1_lambda = 0.001
L2_lambda = 0.001
Пример #29
0
    def __init__(self, alpha_g, alpha_d, trial, version):
        self.BUFFER_SIZE = 60000
        self.BATCH_SIZE = 100
        self.EPOCHS = 250
        self.test_size = 10000
        self.alpha_g = alpha_g
        self.alpha_d = alpha_d
        self.trial = trial
        self.version = version
        self.noise_dim = 28 * 28
        self.num_examples_to_generate = 16

        self.seed = tf.random.normal(
            [self.num_examples_to_generate, self.noise_dim])
        (self.dataset, self.real_mu, self.real_sigma) = data.load_mnist(
            self.BUFFER_SIZE, self.BATCH_SIZE)  #ADD to train function
        self.generator = build_generator()  #Add to build function
        self.discriminator = build_discriminator()  #Add to build function
        self.generator_optimizer = tf.keras.optimizers.Adam(
            learning_rate=0.0001, beta_1=0.5, beta_2=0.999, epsilon=1e-7)
        self.discriminator_optimizer = tf.keras.optimizers.Adam(
            learning_rate=0.0001, beta_1=0.5, beta_2=0.999, epsilon=1e-7)

        self.checkpoint_dir = 'data/renyiganV_' + str(
            self.version) + '/AlphaG=' + str(self.alpha_g) + '_AlphaD=' + str(
                self.alpha_d) + '/trial' + str(
                    self.trial) + './training_checkpoints'
        self.checkpoint_prefix = os.path.join(self.checkpoint_dir, "ckpt")
        self.checkpoint = tf.train.Checkpoint(
            generator_optimizer=self.generator_optimizer,
            discriminator_optimizer=self.discriminator_optimizer,
            generator=self.generator,
            discriminator=self.discriminator)

        self.image_dir = 'data/renyiganV_' + str(
            self.version) + '/AlphaG=' + str(self.alpha_g) + '_AlphaD=' + str(
                self.alpha_d) + '/trial' + str(self.trial) + '/images'
        self.plot_dir = 'data/renyiganV_' + str(
            self.version) + '/AlphaG=' + str(self.alpha_g) + '_AlphaD=' + str(
                self.alpha_d) + '/trial' + str(self.trial) + '/plots'

        self.make_directory('data')
        self.make_directory('data/renyiganV_' + str(self.version))
        self.make_directory('data/renyiganV_' + str(self.version) +
                            '/AlphaG=' + str(self.alpha_g) + '_AlphaD=' +
                            str(self.alpha_d))
        self.make_directory('data/renyiganV_' + str(self.version) +
                            '/AlphaG=' + str(self.alpha_g) + '_AlphaD=' +
                            str(self.alpha_d) + '/trial' + str(self.trial))
        self.make_directory(self.image_dir)
        self.make_directory(self.plot_dir)

        if (version == 1):
            self.generator_loss = loss.generator_loss_renyi
            self.discriminator_loss = loss.discriminator_loss_rgan
        elif (version == 2):
            self.generator_loss = loss.generator_loss_renyiL1
            self.discriminator_loss = loss.discriminator_loss_rgan
        elif (version == 3):
            self.generator_loss = loss.generator_loss_original
            self.discriminator_loss = loss.discriminator_loss_rgan
        elif (version == 4):
            self.generator_loss = loss.generator_loss_rgan
            self.discriminator_loss = loss.discriminator_loss_rgan
        else:
            quit()
Пример #30
0
if __name__ == '__main__':
    # Model hyper-parameters
    latent_dim = 10
    data_dim = 784  # How many pixels in each image (28x28).
    gen_layer_sizes = [latent_dim, 300, 200, data_dim]
    rec_layer_sizes = [data_dim, 200, 300, latent_dim * 2]

    # Training parameters
    param_scale = 0.01
    batch_size = 200
    num_epochs = 15
    step_size = 0.001

    print("Loading training data...")
    N, train_images, _, test_images, _ = load_mnist()
    on = train_images > 0.5
    train_images = train_images * 0 - 1
    train_images[on] = 1.0

    init_gen_params = init_net_params(param_scale, gen_layer_sizes)
    init_rec_params = init_net_params(param_scale, rec_layer_sizes)
    combined_init_params = (init_gen_params, init_rec_params)

    num_batches = int(np.ceil(len(train_images) / batch_size))

    def batch_indices(iter):
        idx = iter % num_batches
        return slice(idx * batch_size, (idx + 1) * batch_size)

    # Define training objective
Пример #31
0
                            "DeepSecure_CNN_MNIST", "test_MLP_MNIST"
                        ])
    parser.add_argument("--batchsize", default=32, type=int)
    parser.add_argument("--epochs", default=100, type=int)
    parser.add_argument("--savedir", type=str)
    parser.add_argument("--alternate", action="store_true")

    args = parser.parse_args()
    print("alternate set to: ", args.alternate)
    batch_size = args.batchsize
    epochs = args.epochs
    savedir = args.savedir

    if "MNIST" in args.model:
        flatten_inputs = "MLP" in args.model
        dataset = data.load_mnist(flatten_inputs)
    else:
        dataset = data.load_cifar()

    if args.model == "SecureML_MLP_MNIST":
        model = models.SecureML_MLP_MNIST(alternate=args.alternate)
        discrete_model = models.SecureML_MLP_MNIST()

        scale = 10

    elif args.model == "CryptoNets_CNN_MNIST":
        model = models.CryptoNets_CNN_MNIST(pooling="max",
                                            alternate=args.alternate)
        discrete_model = models.CryptoNets_CNN_MNIST(pooling="max",
                                                     alternate=args.alternate)
Пример #32
0
OPTIMIZER = nn.SGD
# learning rate
LEARNING_RATE = 0.1
# batch size for stochastic mini-batch gradient descent method
BATCH_SIZE = 64
# number of training epochs
MAX_EPOCHS = 20

# Step 2: load data
print("Loading data...")
import os
#DB = data.load_mnist("D:/arti_intel/Assignments/Project6/project6/data/mnist", batch_size=BATCH_SIZE)
# notMNIST is a more challenging dataset to classify the
# alphabetic letters from a to j.
DB = data.load_mnist(
    "D:/arti_intel/Assignments/Project6/project6/data/not_mnist",
    batch_size=BATCH_SIZE)
print("{} train samples".format(DB.train.n_examples))
print("{} validation samples".format(DB.validation.n_examples))
print("{} test samples".format(DB.test.n_examples))
print("Sample shape: {}".format(DB.feature_shape))
print("Number of classes: {}".format(DB.n_classes))
print()

# set up the logger
logger = Logger(MAX_EPOCHS, DB)


# Step 3: build up the network model
class Model(object):
    def __init__(self, learning_rate):
Пример #33
0
BUFFER_SIZE = 60000
BATCH_SIZE = 100
EPOCHS = 250
test_size = 10000

alpha_g = 0.1
alpha_d = 0.1

version = 1
trial = 1

noise_dim = 28 * 28
num_examples_to_generate = 16
seed = tf.random.normal([num_examples_to_generate, noise_dim])
(dataset, real_mu, real_sigma) = data.load_mnist(BUFFER_SIZE, BATCH_SIZE)
generator = build_generator()
discriminator = build_discriminator()
generator_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001,
                                               beta_1=0.5,
                                               beta_2=0.999,
                                               epsilon=1e-7)
discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001,
                                                   beta_1=0.5,
                                                   beta_2=0.999,
                                                   epsilon=1e-7)

checkpoint_dir = 'data/renyiganV_' + str(version) + '/AlphaG=' + str(
    alpha_g) + '_AlphaD=' + str(alpha_d) + '/trial' + str(
        trial) + './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
Пример #34
0
# choose an optimizer
OPTIMIZER = nn.SGD
# learning rate
LEARNING_RATE = 0.1
# batch size for stochastic mini-batch gradient descent method
BATCH_SIZE = 64
# number of training epochs
MAX_EPOCHS = 20
neurons=128



# Step 2: load data
print("Loading data...")
import os
DB = data.load_mnist("data/mnist", batch_size=BATCH_SIZE)
# notMNIST is a more challenging dataset to classify the
# alphabetic letters from a to i.
# DB = data.load_mnist("data/not_mnist", batch_size=BATCH_SIZE)
print("{} train samples".format(DB.train.n_examples))
print("{} validation samples".format(DB.validation.n_examples))
print("{} test samples".format(DB.test.n_examples))
print("Sample shape: {}".format(DB.feature_shape))
print("Number of classes: {}".format(DB.n_classes))
print()

# set up the logger
logger = Logger(MAX_EPOCHS, DB)

# Step 3: build up the network model
class Model(object):
Пример #35
0
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(12, 5, 5, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(10))
    model.add(Activation('softmax'))

    sgd = SGD(lr=0.01, momentum=0.9, decay=1e-6, nesterov=True)
    model.compile(optimizer=sgd, loss='categorical_crossentropy')

    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=True,
              verbose=1,
              shuffle=True,
              validation_data=(x_test, y_test))

    score = model.evaluate(x_test, y_test, show_accuracy=True, verbose=0)
    print('Test score:', score[0])
    print('Test accuracy:', score[1])


X_train, Y_train, X_test, Y_test = load_mnist()
mnist_cnn(X_train, Y_train, X_test, Y_test)