Example #1
0
def main():
    dataset = get_mnist()
    x_train, y_train = dataset['train']
    num_features = x_train.shape[1]
    num_classes = dataset['n_classes']

    blueprint = [
        {'units': 300, 'dropout': 0.50},
        {'units': 300, 'dropout': 0.50},
        {'units': 200, 'dropout': 0.25},
        {'units': 200, 'dropout': 0.25},
        {'units': 100}]

    model = DenseNetworkClassifier(
        input_size=num_features,
        n_classes=num_classes,
        config=blueprint,
        activation=tf.nn.elu)

    model.build(optimizer=tf.train.AdamOptimizer)

    model.fit(
        X=x_train,
        y=y_train,
        batch_size=1000,
        epochs=200,
        lr0=0.01,
        validation_data=dataset['valid'])
Example #2
0
def main():
    dataset = get_mnist()
    x_train, y_train = dataset['train']
    num_features = x_train.shape[1]
    num_classes = dataset['n_classes']

    model = LogisticClassifier(num_features, num_classes)

    model.build()

    callbacks = [StreamLogger(), ExpoDecay(decay=0.01)]

    model.fit(X=x_train,
              y=y_train,
              batch_size=1000,
              epochs=5,
              lr0=1.0,
              validation_data=dataset['valid'],
              callbacks=callbacks)

    x_valid, y_valid = dataset['valid']
    scores = model.score(x_valid, y_valid)
    formatter = DefaultFormatter()
    print(formatter.to_string(scores))
    classes = model.predict(x_valid)
    print(classes)
Example #3
0
def choose_dataset(opt):
    """ choose dataset
    """
    data_name = opt.data_name
    if data_name == "MNIST":
        setattr(opt, "data_path",
                "/home/victorchen/workspace/Venus/torch_download/")
        setattr(opt, "in_channels", 1)
        data = get_mnist(opt.data_path, opt.batch_size, opt.num_workers)
    elif data_name == "CIFAR10":
        setattr(opt, "data_path",
                "/home/victorchen/workspace/Venus/torch_download/")
        setattr(opt, "in_channels", 3)
        data = get_cifar10(opt.data_path, opt.batch_size, opt.num_workers)
    elif data_name == "FASHION":
        setattr(
            opt, "data_path",
            "/home/victorchen/workspace/Venus/torch_download/FashionMNIST")
        setattr(opt, "in_channels", 1)
        data = get_fashion(opt.data_path, opt.batch_size, opt.num_workers)
    elif data_name == "SVHN":
        setattr(opt, "data_path",
                "/home/victorchen/workspace/Venus/torch_download/svhn")
        setattr(opt, "in_channels", 3)
        data = get_svhn(opt.data_path, opt.batch_size, opt.num_workers)
    elif data_name == "CELEBA":
        setattr(opt, "data_path", "/home/victorchen/workspace/Venus/celebA")
        setattr(opt, "in_channels", 3)
        data = get_unlabeled_celebA(opt.data_path, opt.batch_size,
                                    opt.num_workers)
    else:
        raise NotImplementedError(
            "Not implemented dataset: {}".format(data_name))
    return data
Example #4
0
def mnist_exp(xpu):
    X, Y = data.get_mnist()
    dec_model = DECModel(xpu, X, 10, 1.0, 'data/mnist')
    acc = []
    for i in [10 * (2 ** j) for j in range(9)]:
        acc.append(dec_model.cluster(X, Y, i))
        logging.log(logging.INFO, 'Clustering Acc: %f at update interval: %d' % (acc[-1], i))
    logging.info(str(acc))
    logging.info('Best Clustering ACC: %f at update_interval: %d' % (np.max(acc), 10 * (2 ** np.argmax(acc))))
Example #5
0
def main(args):
    print("Loading data")
    dataset = args.data.rstrip('/').split('/')[-1]
    torch.cuda.set_device(args.cuda)
    device = args.device
    if dataset == 'mnist':
        train_loader, test_loader = get_mnist(args.batch_size, 'data/mnist')
        num = 10
    elif dataset == 'fashion':
        train_loader, test_loader = get_fashion_mnist(args.batch_size,
                                                      'data/fashion')
        num = 10
    elif dataset == 'svhn':
        train_loader, test_loader, _ = get_svhn(args.batch_size, 'data/svhn')
        num = 10
    elif dataset == 'stl':
        train_loader, test_loader, _ = get_stl10(args.batch_size, 'data/stl10')
    elif dataset == 'cifar':
        train_loader, test_loader = get_cifar(args.batch_size, 'data/cifar')
        num = 10
    elif dataset == 'chair':
        train_loader, test_loader = get_chair(args.batch_size,
                                              '~/data/rendered_chairs')
        num = 1393
    elif dataset == 'yale':
        train_loader, test_loader = get_yale(args.batch_size, 'data/yale')
        num = 38
    model = VAE(28 * 28, args.code_dim, args.batch_size, 10,
                dataset).to(device)
    phi = nn.Sequential(
        nn.Linear(args.code_dim, args.phi_dim),
        nn.LeakyReLU(0.2, True),
    ).to(device)
    model.load_state_dict(torch.load(args.fname))
    if args.tsne:
        datas, targets = [], []
        for i, (data, target) in enumerate(test_loader):
            datas.append(data), targets.append(target)
            if i >= 5:
                break
        data, target = torch.cat(datas, dim=0), torch.cat(targets, dim=0)
        c = F.one_hot(target.long(), num_classes=num).float()
        _, _, _, z = model(data.to(args.device), c.to(args.device))
        z, target = z.detach().cpu().numpy(), target.cpu().numpy()
        tsne = TSNE(n_components=2, init='pca', random_state=0)
        z_2d = tsne.fit_transform(z)
        plt.figure(figsize=(6, 5))
        for a in range(8):
            for b in range(a + 1, 10):
                plot_embedding(
                    z_2d,
                    target,
                    a,
                    b,
                )
                plt.savefig('tsne_c{}_{}_{}{}.png'.format(
                    int(args.c), dataset, a, b))
Example #6
0
def mnist_exp(xpu):
    X, Y = data.get_mnist()
    dec_model = DECModel(xpu, X, 10, 1.0, 'data/mnist')
    acc = []
    for i in [10*(2**j) for j in range(9)]:
        acc.append(dec_model.cluster(X, Y, i))
        logging.log(logging.INFO, 'Clustering Acc: %f at update interval: %d'%(acc[-1], i))
    logging.info(str(acc))
    logging.info('Best Clustering ACC: %f at update_interval: %d'%(np.max(acc), 10*(2**np.argmax(acc))))
def train():
    images, len_instance = get_mnist(flags.batch_size)
    G = get_generator([None, flags.z_dim], gf_dim=64, o_size=flags.output_size, o_channel=flags.c_dim)
    D = get_discriminator([None, flags.output_size, flags.output_size, flags.c_dim], df_dim=64)

    G.train()
    D.train()

    d_optimizer = tf.optimizers.Adam(flags.lr, beta_1=flags.beta1)
    g_optimizer = tf.optimizers.Adam(flags.lr, beta_1=flags.beta1)

    n_step_epoch = int(len_instance // flags.batch_size)

    for epoch in range(flags.n_epoch):
        for step, batch_images in enumerate(images):
            if batch_images.shape[0] != flags.batch_size:  # if the remaining data in this epoch < batch_size
                break
            step_time = time.time()
            with tf.GradientTape(persistent=True) as tape:
                z = np.random.normal(loc=0.0, scale=1.0, size=[flags.batch_size, flags.z_dim]).astype(np.float32)
                d_logits = D(G(z))
                d2_logits = D(batch_images)
                # discriminator: real images are labelled as 1
                d_loss_real = tl.cost.sigmoid_cross_entropy(d2_logits, tf.ones_like(d2_logits), name='dreal')
                # discriminator: images from generator (fake) are labelled as 0
                d_loss_fake = tl.cost.sigmoid_cross_entropy(d_logits, tf.zeros_like(d_logits), name='dfake')
                # combined loss for updating discriminator
                d_loss = d_loss_real + d_loss_fake
                # generator: try to fool discriminator to output 1
                g_loss = tl.cost.sigmoid_cross_entropy(d_logits, tf.ones_like(d_logits), name='gfake')

            grad = tape.gradient(g_loss, G.trainable_weights)
            g_optimizer.apply_gradients(zip(grad, G.trainable_weights))
            grad = tape.gradient(d_loss, D.trainable_weights)
            d_optimizer.apply_gradients(zip(grad, D.trainable_weights))
            del tape

            print("Epoch: [{}/{}] [{}/{}] took: {:.3f}, d_loss: {:.5f}, g_loss: {:.5f}".format(epoch, \
                                                                                               flags.n_epoch, step,
                                                                                               n_step_epoch,
                                                                                               time.time() - step_time,
                                                                                               d_loss, g_loss))

        if np.mod(epoch, flags.save_every_epoch) == 0:
            G.save_weights('{}/G.npz'.format(flags.checkpoint_dir), format='npz')
            D.save_weights('{}/D.npz'.format(flags.checkpoint_dir), format='npz')
            G.eval()
            result = G(z)
            G.train()
            tl.visualize.save_images(result.numpy(), [num_tiles, num_tiles],
                                     '{}/train_{:02d}.png'.format(flags.sample_dir, epoch))
Example #8
0
def main():
    run_config = tf.contrib.learn.RunConfig(save_checkpoints_steps=1000)

    hparams = tf.contrib.training.HParams(type="image",
                                          batch_size=64,
                                          learning_rate=0.01,
                                          lr_scheme="exp",
                                          delay=0,
                                          staircased=False,
                                          learning_rate_decay_interval=2000,
                                          learning_rate_decay_rate=0.1,
                                          clip_grad_norm=1.0,
                                          l2_loss=0.0,
                                          label_smoothing=0.1,
                                          init_scheme="random",
                                          warmup_steps=10000,
                                          encoder_depth=2,
                                          decoder_depth=2,
                                          hidden_size=100,
                                          is_ae=True,
                                          activation=tf.nn.sigmoid,
                                          enc_layers=[50, 50],
                                          dec_layers=[50],
                                          label_shape=[1],
                                          dropout=0,
                                          channels=1,
                                          input_shape=[28, 28, 1],
                                          output_shape=[28, 28, 1])

    train_input_fn = get_mnist("tmp/data", hparams, training=True)
    eval_input_fn = get_mnist("tmp/data", hparams, training=False)

    estimator = tf.estimator.Estimator(model_fn=get_autoencoder(hparams, 0.01),
                                       model_dir="tmp/run",
                                       config=run_config)

    estimator.train(train_input_fn, steps=100)
    estimator.evaluate(eval_input_fn, steps=10)
Example #9
0
def main():
    args = get_args()
    print('main() args=', args)
    trn, dev, tst = get_mnist()

    group = TestGroup(args,
                      trn,
                      args.d_minibatch,
                      args.d_hidden,
                      args.n_layer,
                      args.dropout,
                      args.unified,
                      dev,
                      tst,
                      file=sys.stdout)

    # results may be different at each run
    group.run(0, args.n_epoch)
    # mbsize: 32, hidden size: 512, layer: 3, dropout: 0.1, k: 0
    # 0:dev set: Average loss: 0.1202, Accuracy: 4826/5000 (96.52%)
    #     test set: Average loss: 0.1272, Accuracy: 9616/10000 (96.16%)
    # 1:dev set: Average loss: 0.1047, Accuracy: 4832/5000 (96.64%)
    #     test set: Average loss: 0.1131, Accuracy: 9658/10000 (96.58%)
    # 2:dev set: Average loss: 0.0786, Accuracy: 4889/5000 (97.78%)
    #     test set: Average loss: 0.0834, Accuracy: 9749/10000 (97.49%)
    # 3:dev set: Average loss: 0.0967, Accuracy: 4875/5000 (97.50%)
    # 4:dev set: Average loss: 0.0734, Accuracy: 4907/5000 (98.14%)
    #     test set: Average loss: 0.0818, Accuracy: 9790/10000 (97.90%)
    # 5:dev set: Average loss: 0.0848, Accuracy: 4894/5000 (97.88%)
    # 6:dev set: Average loss: 0.0750, Accuracy: 4904/5000 (98.08%)
    # 7:dev set: Average loss: 0.0882, Accuracy: 4897/5000 (97.94%)
    # 8:dev set: Average loss: 0.0978, Accuracy: 4896/5000 (97.92%)
    # 9:dev set: Average loss: 0.0828, Accuracy: 4910/5000 (98.20%)
    #     test set: Average loss: 0.0973, Accuracy: 9797/10000 (97.97%)
    # 10:dev set: Average loss: 0.1004, Accuracy: 4901/5000 (98.02%)
    # 11:dev set: Average loss: 0.0813, Accuracy: 4914/5000 (98.28%)
    #     test set: Average loss: 0.0917, Accuracy: 9795/10000 (97.95%)
    # 12:dev set: Average loss: 0.0880, Accuracy: 4912/5000 (98.24%)
    # 13:dev set: Average loss: 0.1106, Accuracy: 4910/5000 (98.20%)
    # 14:dev set: Average loss: 0.0981, Accuracy: 4921/5000 (98.42%)
    #     test set: Average loss: 0.1065, Accuracy: 9824/10000 (98.24%)
    # 15:dev set: Average loss: 0.1044, Accuracy: 4914/5000 (98.28%)
    # 16:dev set: Average loss: 0.1235, Accuracy: 4904/5000 (98.08%)
    # 17:dev set: Average loss: 0.1202, Accuracy: 4910/5000 (98.20%)
    # 18:dev set: Average loss: 0.1021, Accuracy: 4926/5000 (98.52%)
    #     test set: Average loss: 0.1167, Accuracy: 9800/10000 (98.00%)
    # 19:dev set: Average loss: 0.1490, Accuracy: 4910/5000 (98.20%)
    # $98.52|98.00 at 18
    group.run(args.k, args.n_epoch)
Example #10
0
def main_unified():
    args = get_args_unified()
    print('main_unified() args=', args)
    trn, dev, tst = get_mnist()

    # change the sys.stdout to a file object to write the results to the file
    group = TestGroup(args,
                      trn,
                      args.d_minibatch,
                      args.d_hidden,
                      args.n_layer,
                      args.dropout,
                      args.unified,
                      dev,
                      tst,
                      file=sys.stdout)

    # results may be different at each run
    group.run(0)
    # mbsize: 50, hidden size: 500, layer: 3, dropout: 0.1, k: 0
    # 0:dev set: Average loss: 0.1043, Accuracy: 4843/5000 (96.86%)
    #     test set: Average loss: 0.1163, Accuracy: 9655/10000 (96.55%)
    # 1:dev set: Average loss: 0.0789, Accuracy: 4892/5000 (97.84%)
    #     test set: Average loss: 0.0792, Accuracy: 9766/10000 (97.66%)
    # 2:dev set: Average loss: 0.0818, Accuracy: 4875/5000 (97.50%)
    # 3:dev set: Average loss: 0.0823, Accuracy: 4880/5000 (97.60%)
    # 4:dev set: Average loss: 0.0869, Accuracy: 4888/5000 (97.76%)
    # 5:dev set: Average loss: 0.0810, Accuracy: 4904/5000 (98.08%)
    #     test set: Average loss: 0.0711, Accuracy: 9807/10000 (98.07%)
    # 6:dev set: Average loss: 0.0752, Accuracy: 4903/5000 (98.06%)
    # 7:dev set: Average loss: 0.0805, Accuracy: 4907/5000 (98.14%)
    #     test set: Average loss: 0.0833, Accuracy: 9799/10000 (97.99%)
    # 8:dev set: Average loss: 0.1105, Accuracy: 4876/5000 (97.52%)
    # 9:dev set: Average loss: 0.0913, Accuracy: 4901/5000 (98.02%)
    # 10:dev set: Average loss: 0.0800, Accuracy: 4915/5000 (98.30%)
    #     test set: Average loss: 0.0832, Accuracy: 9830/10000 (98.30%)
    # 11:dev set: Average loss: 0.0909, Accuracy: 4913/5000 (98.26%)
    # 12:dev set: Average loss: 0.0908, Accuracy: 4907/5000 (98.14%)
    # 13:dev set: Average loss: 0.0753, Accuracy: 4920/5000 (98.40%)
    #     test set: Average loss: 0.0902, Accuracy: 9803/10000 (98.03%)
    # 14:dev set: Average loss: 0.0947, Accuracy: 4918/5000 (98.36%)
    # 15:dev set: Average loss: 0.0800, Accuracy: 4918/5000 (98.36%)
    # 16:dev set: Average loss: 0.0822, Accuracy: 4915/5000 (98.30%)
    # 17:dev set: Average loss: 0.1059, Accuracy: 4916/5000 (98.32%)
    # 18:dev set: Average loss: 0.1128, Accuracy: 4914/5000 (98.28%)
    # 19:dev set: Average loss: 0.0936, Accuracy: 4924/5000 (98.48%)
    #     test set: Average loss: 0.1214, Accuracy: 9794/10000 (97.94%)
    # $98.48|97.94 at 19
    group.run()
Example #11
0
def choose_dataset(opt):
    """ choose dataset
    """
    data_name = opt.data_name
    if data_name == "MNIST":
        setattr(opt, "data_path",
                "/home/victorchen/workspace/Venus/torch_download/MNIST")
        setattr(opt, "in_channels", 1)
        data = get_mnist(opt.data_path, opt.batch_size, opt.num_workers,
                         opt.input_size)
    elif data_name == "cifar10":
        setattr(opt, "data_path",
                "/home/victorchen/workspace/Venus/torch_download/")
        setattr(opt, "in_channels", 3)
        data = get_cifar10(opt.data_path, opt.batch_size, opt.num_workers,
                           opt.input_size)
    elif data_name == "fashion":
        setattr(
            opt, "data_path",
            "/home/victorchen/workspace/Venus/torch_download/FashionMNIST")
        setattr(opt, "in_channels", 1)
        data = get_fashion(opt.data_path, opt.batch_size, opt.num_workers,
                           opt.input_size)
    elif data_name == "svhn":
        setattr(opt, "data_path",
                "/home/victorchen/workspace/Venus/torch_download/svhn")
        setattr(opt, "in_channels", 3)
        data = get_svhn(opt.data_path, opt.batch_size, opt.num_workers,
                        opt.input_size)
    elif data_name == "unlabeled_celeba":
        setattr(opt, "data_path",
                "/home/victorchen/workspace/Venus/celebA/images")
        setattr(opt, "in_channels", 3)
        data = get_unlabeled_celebA(opt.data_path, opt.batch_size,
                                    opt.num_workers, opt.input_size)
    elif data_name == "folder":
        data = get_folder_dataset(opt.data_path, opt.batch_size,
                                  opt.num_workers, opt.input_size)
    elif data_name == "folder_res":
        data = get_resolution(opt.data_path, opt.input_size, opt.batch_size,
                              opt.num_workers)
    else:
        raise NotImplementedError(
            "Not implemented dataset: {}".format(data_name))
    return data
Example #12
0
def main():
    args = get_args()
    trn, dev, tst = get_mnist()

    # change the sys.stdout to a file object to write the results to the file
    group = TestGroup(args,
                      trn,
                      args.d_minibatch,
                      args.d_hidden,
                      args.n_layer,
                      args.dropout,
                      dev,
                      tst,
                      cudatensor=False,
                      file=sys.stdout)

    # results may be different at each run
    #with torch.autograd.profiler.profile(use_cuda=True) as prof:
    group.run()
Example #13
0
 def __init__(self, flags, type):
     self.dataset, self.len_instance = get_mnist(flags.batch_size)
     self.G = get_generator([None, flags.z_dim],
                            gf_dim=64,
                            o_size=flags.output_size,
                            o_channel=flags.c_dim)
     self.D = get_discriminator(
         [None, flags.output_size, flags.output_size, flags.c_dim],
         df_dim=64)
     self.batch_size = flags.batch_size
     self.epoch = flags.n_epoch
     self.type = type
     assert type in methods_dict.keys()
     self.get_loss = methods_dict[type]
     if type == "WGAN":
         self.d_optimizer = tf.optimizers.RMSprop(flags.lr)
         self.g_optimizer = tf.optimizers.RMSprop(flags.lr)
     else:
         self.d_optimizer = tf.optimizers.Adam(flags.lr, beta_1=flags.beta1)
         self.g_optimizer = tf.optimizers.Adam(flags.lr, beta_1=flags.beta1)
Example #14
0
def train_cnn():
    batch_size = 100
    train_iter, val_iter = get_mnist(batch_size)

    cnn_model = get_cnn_sym()
    plot = mx.viz.plot_network(cnn_model,
                               title="cnn",
                               save_format="pdf",
                               hide_weights=True)
    plot.render("CNN")

    model = mx.mod.Module(symbol=cnn_model, context=mx.cpu())

    model.fit(
        train_iter,
        eval_data=val_iter,
        optimizer="sgd",
        optimizer_params={"learning_rate": 0.1},
        eval_metric="acc",
        batch_end_callback=mx.callback.Speedometer(batch_size, 100),
        # output progress for each 100 data batches
        num_epoch=5)
Example #15
0
def train_mlp():
    # Get the data iterator
    batch_size = 100
    train_iter, val_iter = get_mnist(batch_size)

    # Get MLP symbol
    mlp_model = get_mlp_sym()
    # Viz the graph and save the plot for debugging
    plot = mx.viz.plot_network(mlp_model,
                               title="mlp",
                               save_format="pdf",
                               hide_weights=True)
    plot.render("MLP")

    model = mx.mod.Module(symbol=mlp_model, context=mx.cpu())
    model.fit(
        train_iter,  # train data
        eval_data=val_iter,  # validation data
        optimizer='sgd',  # use SGD to train
        optimizer_params={'learning_rate': 0.1},  # use fixed learning rate
        eval_metric='acc',  # report accuracy during training
        batch_end_callback=mx.callback.Speedometer(batch_size, 100),
        # output progress for each 100 data batches
        num_epoch=5)
    parser.add_argument('--lr', type=float, default=0.01)
    parser.add_argument('--momentum', type=float, default=0.5)
    parser.add_argument('--lamb', type=float, default=0.1)
    parser.add_argument('--dropout', type=float, default=0.1)
    parser.add_argument('--disable-cuda',
                        action='store_true',
                        help='Disable CUDA')
    parser.add_argument('--soft',
                        action='store_true',
                        help='Enable soft targets')
    parser.add_argument('--log_interval', type=int, default=20)

    args = parser.parse_args()
    cuda = not args.disable_cuda and torch.cuda.is_available()

    trainset, testset, classes, shape = get_mnist(soft=args.soft)
    w, h = shape

    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              pin_memory=cuda)

    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=cuda)

    tree = SDTDropout(k=len(classes), in_features=w * h, args=args)

    if cuda:
Example #17
0
#*********************************
# Test custom simple neural network (MLP)
# Author: Manuel Serna-Aguilera
#*********************************

import data  # use custom data import code
import mlp  # use mlp (fully-connected network)
import numpy as np

# Get MNIST data & one-hot encode
x_train, y_train, x_test, y_test = data.get_mnist()

# Train MLP model
s = [
    784, 16, 16, 10
]  # length of input at element 0, with subsequent elements denoting number of neurons for subsequent layers
n = 10  # classes
iterations = 5  # i.e. epochs
learning_rate = 0.001

model = mlp.MLP(s=s, n_classes=n)
model.train(x_train=x_train,
            y_train=y_train,
            x_val=x_test,
            y_val=y_test,
            epochs=5,
            lr=learning_rate)
Example #18
0
def main():
    #set hyperparameter at here
    hiddenlayerlist = [[
        16, 32, 16
    ]]  #change the number of hidden layer, and nodes in the layer

    ss = 1e-2  #step Size
    numofiter = 300  #iterations
    size = 2500  #input size
    dim = 2  #input dimension
    margin = 0  #change Margin at here, change this value to 0 to make the data not linear separable

    output_unit = 1

    algorithm = input('Select algorithm: (input ebp, r+, r-, ir+ or ir-)')
    #	algorithm = 'r+'
    modeltype = input(
        'Classification or Regression? (input c, r, mnist or bc)')
    modeltype = 'mnist'

    if modeltype == 'c':

        #generate the input and output for classification
        inputdata, outputdata = generatedata(size, dim, margin)

        #plot to viaualize if it is 1D
        print('Training Data Plot: ')
        plt.figure(1)
        if dim == 1:

            plt.scatter(inputdata[:size // 2, 0],
                        np.ones((size // 2, 1)),
                        color='r')
            plt.scatter(inputdata[size // 2:, 0],
                        np.ones((size // 2, 1)),
                        color='b')
            plt.legend(['Label 1', 'Label 0'], loc='upper right')
        elif dim == 2:

            plt.scatter(inputdata[:size // 2, 0],
                        inputdata[:size // 2, 1],
                        color='r')
            plt.scatter(inputdata[size // 2:, 0],
                        inputdata[size // 2:, 1],
                        color='b')
            plt.legend(['Label 1', 'Label 0'], loc='upper right')

        network = net(inputdata, outputdata, size, ss, numofiter, dim,
                      hiddenlayerlist, modeltype, algorithm, output_unit, [],
                      0)
        network.backpropagation()
        output = network.forwardewithcomputedW(inputdata)

        #plot network computed result
        output = np.append(inputdata, output, axis=1)
        print('Network computed output: ')

        plt.figure(4)
        if dim == 1:

            output1 = output[output[:, -1] == 1]
            output2 = output[output[:, -1] == 0]
            plt.scatter(output1[:, 0],
                        np.ones((np.shape(output1)[0], 1)),
                        color='r')
            plt.scatter(output2[:, 0],
                        np.ones((np.shape(output2)[0], 1)),
                        color='b')
            plt.legend(['Label 1', 'Label 0'], loc='upper right')

        if dim == 2:
            output1 = output[output[:, -1] == 1]
            output2 = output[output[:, -1] == 0]
            plt.scatter(output1[:, 0], output1[:, 1], color='r')
            plt.scatter(output2[:, 0], output2[:, 1], color='b')
            plt.legend(['Label 1', 'Label 0'], loc='upper right')

        plt.show()

    elif modeltype == 'r':
        #generate the input and output for regression
        inputdata, outputdata = generatedataForRegression(size, dim)
        network = net(inputdata, outputdata, size, ss, numofiter, dim,
                      hiddenlayerlist, modeltype, output_unit, [], 0)
        network.backpropagation()
        if dim == 2:
            fig = plt.figure(figsize=(10, 10))
            ax = plt.axes(projection='3d')
            X = np.arange(-4, 4, 0.1)
            Y = np.arange(-4, 4, 0.1)
            X, Y = np.meshgrid(X, Y)
            a = X.flatten()
            b = Y.flatten()
            testx = np.append(np.reshape(a, (len(a), 1)),
                              np.reshape(b, (len(b), 1)),
                              axis=1)
            outputy = np.reshape(network.forwardewithcomputedW(testx),
                                 np.shape(X))
            ax.plot_surface(X,
                            Y,
                            outputy,
                            rstride=1,
                            cstride=1,
                            cmap=cm.coolwarm,
                            linewidth=0,
                            antialiased=False)

    elif modeltype == 'mnist':

        train_images, train_labels, test_images, test_labels = get_mnist()

        #		size = train_images.shape[0]
        size = 60000
        numofiter = 1000
        dim = 28**2
        hiddenlayerlist = [[1000]]  # 2500, 2000, 1500, 1000, 500
        output_unit = 10

        ss = 5e-2

        print('Algorithm: ' + algorithm + '\nModel type: ' + modeltype +
              '\nIterations: ' + str(numofiter) + '\nLearning rate: ' +
              str(ss))

        # get_one_hot(train_labels[: size, :], 10)
        # train_labels[: size, :].flatten()
        network = net(train_images[:size, :],
                      get_one_hot(train_labels[:size, :], 10), size, ss,
                      numofiter, dim, hiddenlayerlist, modeltype, algorithm,
                      output_unit, [], 5000)
        network.backpropagation()

        # load the saved model
        filename = 'wb_' + modeltype + '_' + algorithm + '_' + str(
            numofiter) + '.npz'
        wb_ini = np.load(filename)['arr_0'].tolist()
        network = net(train_images[:size, :],
                      get_one_hot(train_labels[:size, :], 10), size, ss,
                      numofiter, dim, hiddenlayerlist, modeltype, algorithm,
                      output_unit, wb_ini, 0)

        # test the accuracy

        tst_size = 10000

        tst_imgs = train_images[:tst_size]
        tst_lbls = train_labels[:tst_size].flatten()

        tst_out_raw = network.forwardewithcomputedW(tst_imgs)
        tst_out_cls = np.argmax(tst_out_raw, axis=1)

        accuracy = sum(tst_out_cls == tst_lbls) / tst_size
        print('test accuracy: ' + str(accuracy))


#		set_trace()

    elif modeltype == 'bc':
        data = np.genfromtxt("breastCancerData.csv", delimiter=",")
        label = np.genfromtxt("breastCancerLabels.csv", delimiter=",")
        MinMaxscaler = sklearn.preprocessing.MinMaxScaler()
        data = np.float32(MinMaxscaler.fit_transform(data))
        #Split Train and Test Data
        trainD, testD, trainT, testT = train_test_split(data,
                                                        label,
                                                        random_state=6)

        size = np.shape(trainD)[0]
        numofiter = 1000
        dim = 9
        hiddenlayerlist = [[80, 100, 50]]
        output_unit = 1

        network = net(trainD, np.reshape(trainT, (len(trainT), 1)), size, ss,
                      numofiter, dim, hiddenlayerlist, modeltype, algorithm,
                      output_unit, [], 0)
        network.backpropagation()
        output = network.forwardewithcomputedW(testD)
        accuracy = sum(
            output == np.reshape(testT, (len(testT), 1))) / len(testT)
        print('test accuracy: ' + str(accuracy[0]))
Example #19
0
    def eval(self, X):
        batch_size = 100
        data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=False,
                                      last_batch_handle='pad')
        Y = model.extract_feature(self.loss, self.args, data_iter,
                                 X.shape[0], self.xpu).values()[0]
        return np.mean(np.square(Y-X))/2.0



if __name__ == '__main__':
    # set to INFO to see less information during training
    logging.basicConfig(level=logging.DEBUG) 
    ae_model = AutoEncoderModel(mx.gpu(0), [784,500,500,2000,10], pt_dropout=0.2)

    X, _ = data.get_mnist()
    train_X = X[:60000]
    val_X = X[60000:]

    ae_model.layerwise_pretrain(train_X, 256, 50000, 'sgd', l_rate=0.1, decay=0.0,
                             lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
    ae_model.finetune(train_X, 256, 100000, 'sgd', l_rate=0.1, decay=0.0,
                   lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
    ae_model.save('mnist_pt.arg')
    ae_model.load('mnist_pt.arg')
    print "Training error:", ae_model.eval(train_X)
    print "Validation error:", ae_model.eval(val_X)



Example #20
0
def main(args):
    print("Loading data")
    dataset = args.data.rstrip('/').split('/')[-1]
    torch.cuda.set_device(args.cuda)
    device = args.device
    if dataset == 'mnist':
        train_loader, test_loader = get_mnist(args.batch_size, 'data/mnist')
        num = 10
    elif dataset == 'fashion':
        train_loader, test_loader = get_fashion_mnist(args.batch_size,
                                                      'data/fashion')
        num = 10
    elif dataset == 'svhn':
        train_loader, test_loader, _ = get_svhn(args.batch_size, 'data/svhn')
        num = 10
    elif dataset == 'stl':
        train_loader, test_loader, _ = get_stl10(args.batch_size, 'data/stl10')
    elif dataset == 'cifar':
        train_loader, test_loader = get_cifar(args.batch_size, 'data/cifar')
        num = 10
    elif dataset == 'chair':
        train_loader, test_loader = get_chair(args.batch_size,
                                              '~/data/rendered_chairs')
        num = 1393
    elif dataset == 'yale':
        train_loader, test_loader = get_yale(args.batch_size, 'data/yale')
        num = 38
    model = VAE(28 * 28, args.code_dim, args.batch_size, num,
                dataset).to(device)
    phi = nn.Sequential(
        nn.Linear(args.code_dim, args.phi_dim),
        nn.LeakyReLU(0.2, True),
    ).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    optimizer_phi = torch.optim.Adam(phi.parameters(), lr=args.lr)
    criterion = nn.MSELoss(reduction='sum')
    for epoch in range(args.epochs):
        re_loss = 0
        kl_div = 0
        size = len(train_loader.dataset)
        for data, target in train_loader:
            data, target = data.squeeze(1).to(device), target.to(device)
            c = F.one_hot(target.long(), num_classes=num).float()
            output, q_z, p_z, z = model(data, c)
            hsic = HSIC(phi(z), target.long(), num)
            if dataset == 'mnist' or dataset == 'fashion':
                reloss = recon_loss(output, data.view(-1, 28 * 28))
            else:
                reloss = criterion(output, data)
            kld = total_kld(q_z, p_z)
            loss = reloss + kld + args.c * hsic

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            optimizer_phi.zero_grad()
            neg = -HSIC(phi(z.detach()), target.long(), num)
            neg.backward()
            optimizer_phi.step()

            re_loss += reloss.item() / size
            kl_div += kld.item() / size
        print('-' * 50)
        print(
            " Epoch {} |re loss {:5.2f} | kl div {:5.2f} | hs {:5.2f}".format(
                epoch, re_loss, kl_div, hsic))
    for data, target in test_loader:
        data, target = data.squeeze(1).to(device), target.to(device)
        c = F.one_hot(target.long(), num_classes=num).float()
        output, _, _, z = model(data, c)
        break
    if dataset == 'mnist' or dataset == 'fashion':
        img_size = [data.size(0), 1, 28, 28]
    else:
        img_size = [data.size(0), 3, 32, 32]
    images = [data.view(img_size)[:30].cpu()]
    for i in range(10):
        c = F.one_hot(torch.ones(z.size(0)).long() * i,
                      num_classes=num).float().to(device)
        output = model.decoder(torch.cat((z, c), dim=-1))
        images.append(output.view(img_size)[:30].cpu())
    images = torch.cat(images, dim=0)
    save_image(images,
               'imgs/recon_c{}_{}.png'.format(int(args.c), dataset),
               nrow=30)
    torch.save(model.state_dict(),
               'vae_c{}_{}.pt'.format(int(args.c), dataset))
    # z = p_z.sample()
    # for i in range(10):
    #     c = F.one_hot(torch.ones(z.size(0)).long()*i, num_classes=10).float().to(device)
    #     output = model.decoder(torch.cat((z, c), dim=-1))
    #     n = min(z.size(0), 8)
    #     save_image(output.view(z.size(0), 1, 28, 28)[:n].cpu(), 'imgs/recon_{}.png'.format(i), nrow=n)
    if args.tsne:
        datas, targets = [], []
        for i, (data, target) in enumerate(test_loader):
            datas.append(data), targets.append(target)
            if i >= 5:
                break
        data, target = torch.cat(datas, dim=0), torch.cat(targets, dim=0)
        c = F.one_hot(target.long(), num_classes=num).float()
        _, _, _, z = model(data.to(args.device), c.to(args.device))
        z, target = z.detach().cpu().numpy(), target.cpu().numpy()
        tsne = TSNE(n_components=2, init='pca', random_state=0)
        z_2d = tsne.fit_transform(z)
        plt.figure(figsize=(6, 5))
        plot_embedding(z_2d, target)
        plt.savefig('tsnes/tsne_c{}_{}.png'.format(int(args.c), dataset))
Example #21
0
from data import get_mnist
import numpy as np
import matplotlib.pyplot as plt


"""
w = weights, b = bias, i = input, h = hidden, o = output, l = label
e.g. w_i_h = weights from input layer to hidden layer
"""
images, labels = get_mnist()
w_i_h = np.random.uniform(-0.5, 0.5, (20, 784))
w_h_o = np.random.uniform(-0.5, 0.5, (10, 20))
b_i_h = np.zeros((20, 1))
b_h_o = np.zeros((10, 1))

learn_rate = 0.01
nr_correct = 0
epochs = 3
for epoch in range(epochs):
    for img, l in zip(images, labels):
        img.shape += (1,)
        l.shape += (1,)
        # Forward propagation input -> hidden
        h_pre = b_i_h + w_i_h @ img
        h = 1 / (1 + np.exp(-h_pre))
        # Forward propagation hidden -> output
        o_pre = b_h_o + w_h_o @ h
        o = 1 / (1 + np.exp(-o_pre))

        # Cost / Error calculation
        e = 1 / len(o) * np.sum((o - l) ** 2, axis=0)
Example #22
0
import matplotlib
import matplotlib.pyplot as plt
import numpy as np

#%% Train Three Layer (Input-Hidden-Output) Neural Network

# Nomenclature,
#
# w -> connection/link weights
# b -> bias values
# i -> input layer
# h -> hidden layer
# o -> output layer
# l -> labels

images, labels = get_mnist()  # 60000-by-784, 60000-by-1, respectively.

# Initialize input-to-hidden layer weights and set bias values to zero.
np.random.seed(0)
w_i_h = np.random.uniform(-0.5, 0.5, (20, 784))  # 20-by-784
b_i_h = np.zeros((20, 1))  # 20-by-1

# Initialize hidden-to-output layer weights and set bias values to zero.
np.random.seed(0)
w_h_o = np.random.uniform(-0.5, 0.5, (10, 20))  # 10-by-20
b_h_o = np.zeros((10, 1))  # 20-by-1

print("\n\n")

learn_rate = 0.01
epochs = 50
Example #23
0
def main(args):
    print("Loading data")
    dataset = args.data.rstrip('/').split('/')[-1]
    if dataset in ['mnist']:
        train_loader, test_loader = get_mnist(args.batch_size, args.data)
    elif dataset in ['cifar']:
        train_loader, test_loader, classes = get_cifar(args.batch_size,
                                                       args.data)
    elif dataset in ['svhn']:
        train_loader, test_loader, extra_loader = get_svhn(
            args.batch_size, args.data)
    elif dataset in ['fashion']:
        train_loader, test_loader = get_fashion_mnist(args.batch_size,
                                                      args.data)
    elif dataset in ['stl10']:
        train_loader, test_loader, unlabeled_loader = get_stl10(
            args.batch_size, args.data)
    else:
        raise NotImplementedError
    torch.cuda.set_device(args.device_id)
    for _, (batch, _) in enumerate(train_loader):
        size = batch.size()
        break

    model = Classifier(batch.size(-1) * batch.size(-1),
                       len(rotations)).to(args.device)

    optimizer = torch.optim.Adam(model.parameters(), args.lr)

    start_epoch = 1
    print('\nStarting Training')
    try:
        for epoch in range(start_epoch, args.epochs):
            nll, re_loss, kl_divergence, d_loss = run(args,
                                                      train_loader,
                                                      model,
                                                      optimizer,
                                                      epoch,
                                                      train=True)
            print('-' * 90)
            meta = "| epoch {:2d} ".format(epoch)
            print(
                meta +
                "| Train NLL: {:5.2f} | Train loss: {:5.2f} ({:5.2f}) | D loss {:5.2f} |"
                .format(nll, re_loss, kl_divergence, d_loss))

            nll, re_loss, kl_divergence, d_loss = run(args,
                                                      test_loader,
                                                      model,
                                                      optimizer,
                                                      1,
                                                      train=False)
            print(
                len(meta) * " " +
                "| Test NLL: {:5.2f} | Test loss: {:5.2f} ({:5.2f}) | D loss {:5.2f} |"
                .format(nll, re_loss, kl_divergence, d_loss))

    except KeyboardInterrupt:
        print('-' * 50)
        print('Quit Training')

    nll, re_loss, kl_divergence, d_loss = run(args,
                                              test_loader,
                                              model,
                                              optimizer,
                                              epoch,
                                              train=False)
    print('=' * 90)
    print(
        "| Train NLL: {:5.2f} | Train loss: {:5.2f} ({:5.2f}) | D loss {:5.2f} |"
        .format(nll, re_loss, kl_divergence, d_loss))
Example #24
0
        data_iter = mx.io.NDArrayIter([X],
                                      batch_size=batch_size,
                                      shuffle=False,
                                      last_batch_handle='pad')
        Y = model.extract_feature(self.loss, self.args, ['data'], data_iter,
                                  X.shape[0], self.xpu).values()[0]
        return np.mean(np.square(Y - X)) / 2.0


if __name__ == '__main__':
    # set to INFO to see less information during training
    logging.basicConfig(level=logging.DEBUG)
    ae_model = AutoEncoderModel(mx.gpu(0), [784, 500, 500, 2000, 10],
                                pt_dropout=0.2)

    X, _ = data.get_mnist()
    train_X = X[:60000]
    val_X = X[60000:]

    ae_model.layerwise_pretrain(train_X,
                                256,
                                50000,
                                'sgd',
                                l_rate=0.1,
                                decay=0.0,
                                lr_scheduler=mx.misc.FactorScheduler(
                                    20000, 0.1))
    ae_model.finetune(train_X,
                      256,
                      100000,
                      'sgd',
Example #25
0
def main():
    n_train = 100
    n_test = 100
    model_type = 'CycleGLO'
    if model_type == 'GLO':
        train, test = data.get_cifar10(n_train, n_test, 1, True, classes=[3])
        n_vectors = len(train)
        n_pixels = len(train[0][0])
        code_dim = 64
        print(n_pixels)
        lossfun = F.mean_squared_error
        model = GLOModel(code_dim, n_vectors, 1, train, n_pixels, 50)
        #show_results(model)
        model.train(lossfun, n_epochs=10)

        show_results(model)

    elif model_type == 'CycleGAN':
        # Read train/test data
        train_data1, test_data1 = data.get_mnist(n_train,
                                                 n_test,
                                                 1,
                                                 False,
                                                 classes=[3])
        train_data2, test_data2 = data.get_mnist(n_train,
                                                 n_test,
                                                 1,
                                                 False,
                                                 classes=[5])
        train_data = data.pair_datasets(train_data1, train_data2)

        # Create model
        n_pixels = len(train_data[0][0])
        g_hidden = 50
        d_hidden = 50
        d_learning_rate = 0.01
        g_learning_rate = 0.05
        #model = GANModel(n_pixels, g_hidden, d_hidden, d_learning_rate, g_learning_rate)
        alpha = 0.01
        beta = 0.5
        lambda1 = 10.0
        lambda2 = 3.0
        learningrate_decay = 0.0
        learningrate_interval = 1000
        max_buffer_size = 25
        model = CycleGAN(alpha, beta, lambda1, lambda2, n_pixels,
                         learningrate_decay, learningrate_interval, g_hidden,
                         d_hidden, max_buffer_size)

        # Train model
        lossfun = F.mean_squared_error

        n_epochs = 1000
        d_steps = 1
        g_steps = 1
        minibatch_size = 1

        mu = 1
        sigma = 1
        noisefun = np.random.normal
        #g_sampler = NoiseSampler(fun=noisefun, loc=mu, scale=sigma, size=(n_pixels, 1))  # iterator over randomized noise
        #d_input_iter = iterators.SerialIterator(train_data, batch_size=minibatch_size, repeat=True, shuffle=True)  # iterator over real data
        # model.train(d_input_iter, g_sampler, lossfun, n_epochs, d_steps, g_steps, minibatch_size)

        batch_iter = iterators.MultiprocessIterator(train_data,
                                                    batch_size=minibatch_size,
                                                    n_processes=4)
        model.train(n_epochs, batch_iter)

        # Visualize training

        # Visualize result/test/performance
        sqrt_pixels = int(np.sqrt(n_pixels))
        #for g_index in range(0, 10):
        #    gen_input = g_sampler(1)
        #    g_fake_data = model.G(gen_input)
        #    f, axarr = plt.subplots(1, 2)
        #    axarr[0].imshow(gen_input.reshape((sqrt_pixels, sqrt_pixels)))
        #    axarr[0].set_title('noise input')
        #    axarr[1].imshow(g_fake_data.data.reshape((sqrt_pixels, sqrt_pixels)))
        #    axarr[1].set_title('generated sample')
        #    plt.show()
        print('Visualizing!')
        for input in test_data1[:5]:
            generated = model.g(input.reshape(1, n_pixels))
            #print(generated.data)
            f, axarr = plt.subplots(1, 2)
            axarr[0].imshow(input.reshape((sqrt_pixels, sqrt_pixels)))
            axarr[0].set_title('input image for G')
            axarr[1].imshow(generated.data.reshape((sqrt_pixels, sqrt_pixels)))
            axarr[1].set_title('generated sample')
            plt.show()

        for input in test_data2[:5]:
            generated = model.f(input.reshape(1, n_pixels))
            #print(generated.data)
            f, axarr = plt.subplots(1, 2)
            axarr[0].imshow(input.reshape((sqrt_pixels, sqrt_pixels)))
            axarr[0].set_title('input image for F')
            axarr[1].imshow(generated.data.reshape((sqrt_pixels, sqrt_pixels)))
            axarr[1].set_title('generated sample')
            plt.show()
    elif model_type == 'CycleGLO':
        train_data1, test_data1 = data.get_mnist(n_train,
                                                 n_test,
                                                 1,
                                                 False,
                                                 classes=[3])
        train_data2, test_data2 = data.get_mnist(n_train,
                                                 n_test,
                                                 1,
                                                 False,
                                                 classes=[5])
        train_data = data.pair_datasets(train_data1, train_data2)

        # Create model
        n_pixels = len(train_data[0][0])
        g_hidden = 50
        d_hidden = 50

        code_dim = 64
        alpha = 0.01
        beta = 0.5
        lambda1 = 10.0
        lambda2 = 3.0
        learningrate_decay = 0.0
        learningrate_interval = 1000
        max_buffer_size = 25
        model = CycleGLO(alpha, beta, lambda1, lambda2, n_pixels, code_dim,
                         train_data, learningrate_decay, learningrate_interval,
                         g_hidden, d_hidden, max_buffer_size)

        # Train model
        lossfun = F.mean_squared_error

        model.train(lossfun, n_epochs=1000)
        show_results(model, False)
if __name__ == '__main__':

    parser = argparse.ArgumentParser(description='SoftDecisionTree on MNIST')
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--depth', type=int, default=7)
    parser.add_argument('--epochs', type=int, default=100)
    parser.add_argument('--disable-cuda',
                        action='store_true',
                        help='Disable CUDA')
    parser.add_argument('--log_interval', type=int, default=20)

    args = parser.parse_args()
    cuda = not args.disable_cuda and torch.cuda.is_available()

    trainset, testset, classes, shape = get_mnist(input_dimensions=2)
    w, h = shape

    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              pin_memory=cuda)

    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=cuda)

    model = LeNet(k=len(classes), args=args)

    if cuda:
Example #27
0
from dnnet.utils.nn_utils import scale_normalization

from dnnet.training.optimizer import AdaGrad
from dnnet.training.weight_initialization import DefaultInitialization, He
from dnnet.training.loss_function import MultinomialCrossEntropy

from dnnet.layers.activation import Activation, ActivationLayer
from dnnet.layers.affine import AffineLayer
from dnnet.layers.batch_norm import BatchNormLayer
from dnnet.layers.convolution import ConvolutionLayer
from dnnet.layers.dropout import DropoutLayer
from dnnet.layers.pooling import PoolingLayer

from data import get_mnist

x, y = get_mnist('../../data')
scale_normalization(x)
x = x.reshape(-1, 1, 28, 28)

dtype = np.float32
force_cpu = {
    'activation': True,
    'batch_norm': True,
    'dropout': True,
    'pooling': False,
}

model = NeuralNetwork(input_shape=(1, 28, 28), dtype=dtype)

model.add(
    ConvolutionLayer(filter_shape=(32, 3, 3),
Example #28
0

import numpy as np
import keras
from data import get_mnist

from keras.models import Model
from keras.layers import Input, Dense,  Activation, Flatten, Conv2D
from keras import optimizers


# In[2]:


# Get the training data, this loads the mnist dataset if not already present
X_train, X_test, Y_train, Y_test, img_rows, img_cols, num_classes = get_mnist()

# Create a data input layer
InputLayer = Input(shape=(img_rows, img_cols,1), name="input")

# First convolution layer
conv_1 = Conv2D(25, (5, 5), strides = (2,2), activation = "relu")(InputLayer)
# Second convolution layer
conv_2 = Conv2D(50, (3, 3), strides = (2,2), activation = "relu")(conv_1)

# 2 fully connected layers with RELU activations
conv_output = Flatten()(conv_2)
fc1 = Dense(500)(conv_output)
fc1 = Activation("relu")(fc1)
fc2 = Dense(num_classes)(fc1)
PredictionLayer = Activation("softmax", name ="error_loss")(fc2)
Example #29
0
# 学习率
lr_start = 1e-3
lr_end = 1e-4
lr_decay = (lr_end / lr_start)**(1. / epochs)

# BatchNormalization参数
epsilon = 1e-6
momentum = 0.9

# dropout 参数
drop_in = 0.2
drop_hidden = 0.5

# 下载 MNIST 数据集, 分为训练和测试数据
(X_train, y_train), (X_test, y_test) = get_mnist()

X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

# 将类别标签转为 -1 或者 1
Y_train = np_utils.to_categorical(y_train, nb_classes) * 2 - 1 # -1 or 1 for hinge loss
Y_test = np_utils.to_categorical(y_test, nb_classes) * 2 - 1

model = Sequential()
Example #30
0
import mxnet as mx
import numpy as np
import cv2
import logging
from data import get_mnist
from mlp_sym import get_mlp_sym, get_conv_sym
logging.getLogger().setLevel(logging.DEBUG)  # logging to stdout

if __name__ == "__main__":

    # Get the data iterator
    batch_size = 100
    train_iter, val_iter = get_mnist(batch_size)

    # Get symbol
    # model = get_mlp_sym()
    # todo: model = get_conv_sym()

    model_conv = get_conv_sym()

    # Viz the graph and save the plot for debugging
    plot = mx.viz.plot_network(model_conv,
                               title="Convolution",
                               save_format="pdf",
                               hide_weights=True)
    plot.render("Convolution")

    # Viz the graph and save the plot for debugging
    #plot = mx.viz.plot_network(model, title="mlp", save_format="pdf", hide_weights=True)
    #plot.render("MLP")
Example #31
0
pool_size = (2, 2)
classes = 10
use_bias = False

# 学习率变化安排
lr_start = 1e-3
le_end = 1e-4
lr_decay = (le_end / lr_start)**(1 / epochs)

# Batch Normalization 参数

epsilon = 1e-6
momentum = 0.9

# 下载MNIST数据集,。分为训练和测试数据
mnist = get_mnist()

(train_data, train_label, test_data, test_label) = mnist
# print(train_data.shape, train_label.shape)
train_data = train_data.reshape(60000, 1, 28, 28)
print(train_data.shape)
test_data = test_data.reshape(10000, 1, 28, 28)
train_data = train_data.astype('float32')
test_data = test_data.astype('float32')
train_data /= 225
test_data /= 225
print(train_data.shape[0], 'train samples')
print(test_data.shape[0], 'test samples')

# 将类别标签转为-1 或者1
train_label = np_utils.to_categorical(train_label, classes) * 2 - 1
Example #32
0
    if opt.model_in is None:
        opt.model_out = opt.exp_name + '_model.txt'
    else:
        opt.model_out = opt.model_in
else:
    model_out = opt.model_out

# print config
if opt.verbose:
    options.print_config(opt)

# Load data
N_train = 55000
N_dev = 5000
N_test = 10000
train_x, dev_x, test_x, train_y, dev_y, test_y = data.get_mnist(N_dev, shuffle=opt.shuffle, preprocessing=data.whiten)

# Model parameters
num_classes = len(set(train_y)) # Number of classes
input_length = train_x.shape[1] # Dimension of the input
dh= opt.hidden_dim
di= 1

# Create model
model = dy.Model()                      # DyNet Model
trainer = dy.SimpleSGDTrainer(model,    # Trainer
                              opt.learning_rate,
                              opt.learning_rate_decay)
trainer.set_clip_threshold(-1)          # Disable gradient clipping

# Create the parameters