コード例 #1
0
ファイル: main.py プロジェクト: zubayer87/vae-tf
def all_plots(model, mnist):
    if model.architecture[-1] == 2:  # only works for 2-D latent
        print("Plotting in latent space...")
        plot_all_in_latent(model, mnist)

        print("Exploring latent...")
        plot.exploreLatent(model,
                           nx=20,
                           ny=20,
                           range_=(-4, 4),
                           outdir=PLOTS_DIR)
        for n in (24, 30, 60, 100):
            plot.exploreLatent(model,
                               nx=n,
                               ny=n,
                               ppf=True,
                               outdir=PLOTS_DIR,
                               name="explore_ppf{}".format(n))

    print("Interpolating...")
    interpolate_digits(model, mnist)

    print("Plotting end-to-end reconstructions...")
    plot_all_end_to_end(model, mnist)

    print("Morphing...")
    morph_numbers(model, mnist, ns=[9, 8, 7, 6, 5, 4, 3, 2, 1, 0])

    print("Plotting 10 MNIST digits...")
    for i in range(10):
        plot.justMNIST(get_mnist(i, mnist), name=str(i), outdir=PLOTS_DIR)
コード例 #2
0
ファイル: main.py プロジェクト: Abdelpakey/GANs
def main():
    # get training parameters
    with open('params.json') as f:
        gan_params = json.load(f)

    print('--params--')
    pprint(gan_params)

    dataset_base_dir = './data_set'
    for param in gan_params:
        model_name = param["model-name"]
        epochs = int(param["epochs"])
        mnist_type = param["mnist-type"]
        mnist = utils.get_mnist(dataset_base_dir, mnist_type)

        print('Training {:s} with epochs: {:d}, dataset: {:s}'.format(
            model_name, epochs, mnist_type))

        # get appropriate module and it's class to start training
        module_name = import_module(model_name)
        gan_class = getattr(module_name, model_name.upper())
        net = gan_class(model_name, mnist_type, mnist, epochs)
        net.train()

    return
コード例 #3
0
def morph_numbers(model, mnist, ns=None, n_per_morph=10):
    if not ns:
        import random
        ns = random.sample(range(10), 10) # non-in-place shuffle

    xs = np.squeeze([get_mnist(n, mnist) for n in ns])
    mus, _ = model.encode(xs)
    plot.morph(model, mus, n_per_morph=n_per_morph, outdir=PLOTS_DIR, name="morph_{}".format("".join(str(n) for n in ns)))
コード例 #4
0
    def apply_opt(self):
        # dataset
        if self._opt.dataset == "MNIST":
            train_data, test_data = utils.get_mnist()
            self._train_set = torch.utils.data.DataLoader(
                train_data,
                batch_size=self._opt.batch_size,
                shuffle=True,
                num_workers=self._opt.num_workers)
            self._test_set = torch.utils.data.DataLoader(
                test_data,
                batch_size=self._opt.batch_size,
                shuffle=True,
                num_workers=self._opt.num_workers)
            self._initialize_model(dims=self._opt.layer_dims)
            print("MNIST experiment")

        elif self._opt.dataset == "IBNet":
            train_data = utils.CustomDataset('2017_12_21_16_51_3_275766',
                                             train=True)
            test_data = utils.CustomDataset('2017_12_21_16_51_3_275766',
                                            train=False)
            self._train_set = torch.utils.data.DataLoader(
                train_data,
                batch_size=self._opt.batch_size,
                shuffle=True,
                num_workers=self._opt.num_workers)
            self._test_set = torch.utils.data.DataLoader(
                test_data,
                batch_size=self._opt.batch_size,
                shuffle=True,
                num_workers=self._opt.num_workers)
            self._initialize_model(dims=self._opt.layer_dims)
            print("IBnet experiment")
        else:
            raise RuntimeError(
                'Do not have {name} dataset, Please be sure to use the existing dataset'
                .format(name=self._opt.dataset))

        # construct saving directory
        save_root_dir = self._opt.save_root_dir
        dataset = self._opt.dataset
        time = datetime.datetime.today().strftime('%m_%d_%H_%M')
        model = ''.join(
            list(map(lambda x: str(x) + '_', self._model.layer_dims)))
        folder_name = dataset + '_' + self._opt.experiment_name + '_Time_' + time + '_Model_' + model
        self._path_to_dir = save_root_dir + '/' + folder_name + '/'
        print(self._path_to_dir)
        if not os.path.exists(self._path_to_dir):
            os.makedirs(self._path_to_dir)

        self._logger = Logger(opt=self._opt, plot_name=folder_name)
        self._json = JsonParser()
コード例 #5
0
ファイル: gan.py プロジェクト: namratadeka/vanilla-gan
    def __init__(self):
        '''

        '''
        self.BATCH_SIZE = 32
        self.EPOCHS = 100000
        # self.image_dim = [-1, 32, 32, 3]                          # CIFAR-10
        self.image_dim = [-1, 28, 28, 1]  # MNIST
        # self.discriminator_input_dim = (None, 32, 32, 3)          # CIFAR-10
        self.discriminator_input_dim = (None, 784)  # MNIST
        self.net = Network()
        # self.data = utils.get_cifar10()                           # CIFAR-10
        self.data = utils.get_mnist()  # MNIST
        # self.logdir = "train_logs/cifar10/"                   # CIFAR-10
        self.logdir = "train_logs/mnist/"  # MNIST
コード例 #6
0
def mnist():
    X, Y = utils.get_mnist()
    print ("y shape", Y.shape, Y[0:10])
    Y = None
    X = X.reshape(len(X), 28,28, 1)
    dim = X.shape[1]
    colors = X.shape[-1]

    # mnist
    d_sizes = {"conv_layers":[(2,5,2,False), (64,5,2,True)], "dense_layers":[(1024,True)]}
    g_sizes = {"z":100, "projection": 128, "bn_after_project": False, "conv_layers": [(128,5,2,True),(colors,5,2,False)],
               "dense_layers":[(1024,True)],"output_activation": tf.sigmoid}

    gan = DCGAN(dim, colors, d_sizes, g_sizes)
    gan.fit(X)
コード例 #7
0
def main():
    # Load data
    train, test = get_mnist()

    # Initialize iterators
    train_iter = iterators.SerialIterator(train, batch_size=32, shuffle=True)
    val_iter = iterators.SerialIterator(test,
                                        batch_size=50,
                                        repeat=False,
                                        shuffle=False)

    # Define model
    model = MLP(10, 10)
    optimizer = optimizers.SGD()
    optimizer.setup(model)

    training_losses, validation_losses = run(train_iter, val_iter, test, model,
                                             optimizer, 200)
コード例 #8
0
ファイル: ComputeMI.py プロジェクト: zxh009123/DeepInfoFlow
    def __init__(self):
        self.progress_bar = 0
        self._device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # device setup
        load_config = JsonParser() # training args
        self.model_name = 'IBNet_test_save_Time_05_27_20_09_Model_12_12_10_7_5_4_3_2_2_'
        self.path =os.path.join('./results', self.model_name)# info plane dir
        self._opt = load_config.read_json_as_argparse(self.path) # load training args

        # force the batch size to 1 for calculation convinience
        self._opt.batch_size = 1
        # dataset
        if self._opt.dataset == "MNIST":
            train_data, test_data = utils.get_mnist()

            if not self._opt.full_mi:
                # self._train_set = torch.utils.data.DataLoader(train_data, batch_size=1, shuffle=False, num_workers=0)
                self._test_set = torch.utils.data.DataLoader(test_data, batch_size=1, shuffle=False, num_workers=0)
            else:
                dataset = torch.utils.data.ConcatDataset([train_data, test_data])
                self._test_set = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0)
            print("MNIST experiment")

        elif self._opt.dataset == "IBNet":
            train_data = utils.CustomDataset('2017_12_21_16_51_3_275766', train=True)
            test_data = utils.CustomDataset('2017_12_21_16_51_3_275766', train=False)
            # self._train_set = torch.utils.data.DataLoader(train_data, batch_size=1, shuffle=False, num_workers=0)
            if not self._opt.full_mi:
                self._test_set = torch.utils.data.DataLoader(test_data, batch_size=1, shuffle=False, num_workers=0)
            else:
                dataset = torch.utils.data.ConcatDataset([train_data, test_data])
                self._test_set = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0)
            print("IBnet experiment")
        else:
            raise RuntimeError('Do not have {name} dataset, Please be sure to use the existing dataset'.format(name = self._opt.dataset))

        # get model
        self._model = Model(activation = self._opt.activation ,dims = self._opt.layer_dims, train = False)
        
        # get measure
        # self.measure = measure.kde()
        self.measure = measure.EVKL() # our new measure
コード例 #9
0
def main():
    Xtrain, Ytrain, Xtest, Ytest = get_mnist()

    pca = PCA()
    reduced = pca.fit_transform(Xtrain)
    plt.scatter(reduced[:, 0], reduced[:, 1], s=100, c=Ytrain, alpha=.5)
    plt.show()

    plt.plot(pca.explained_variance_ratio_)
    plt.show()

    #cumulative variance
    #choose k = number of dimensions that gives us 95%-99% variance
    cumulative = []
    last = 0
    for v in pca.explained_variance_ratio_:
        cumulative.append(last + v)
        last = cumulative[-1]

    plt.plot(cumulative)
    plt.show()
コード例 #10
0
ファイル: hopfield.py プロジェクト: ahmedassal/ml-playground
                    data[j] = 1
                else:
                    data[j] = -1
        return data


def binarize(_arr):
    arr = np.float32(_arr)
    arr[arr <= 0] = -1
    arr[arr > 0] = 1
    return arr


if __name__ == '__main__':
    # prepare initial data
    data1, target1 = get_mnist(0, 1)
    data1 = data1.ravel()
    data = np.vstack([data1])
    data = binarize(data)

    net = HopfieldNet(28 * 28)
    net.store(data)

    # get noisy example
    example = binarize(data1)
    noise_idx = np.random.choice(range(28 * 28), 250)
    example[noise_idx] *= -1

    plt.imshow(example.reshape(28, 28), cmap='gray', interpolation='none')
    plt.xticks([])
    plt.yticks([])
コード例 #11
0
ファイル: assignment2.py プロジェクト: pieterwolfert/ccn
    """
    def __init__(self, predictor):
        super(Classifier, self).__init__()
        with self.init_scope():
            self.predictor = predictor

    def __call__(self, x, t):
        y = self.predictor(x)
        loss = F.softmax_cross_entropy(y, t)
        accuracy = F.accuracy(y, t)
        report({'loss': loss, 'accuracy': accuracy}, self)
        return loss


# Retrieve train & test data
train, test = get_mnist()
# split test inputs and labels
inputs, labels = np.array([tup[0] for tup in test
                           ]), np.array([tup[1] for tup in test])
# Set up model & classifier
model = MLP(10, 10)
classifier = Classifier(model)
optimizer = optimizers.SGD()
optimizer.setup(classifier)

# Implement the training loop
iterator = RandomIterator(train, 32)

av_loss = []
ep_loss = []
test_loss = []
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 09:04:38 2020

@author: MY PC
"""

from models import ML_MultiLogisticRegression
from utils import get_mnist, One_Hot_Encode

mnist_data = get_mnist()

X = mnist_data[:, 1:]
Y = mnist_data[:, 0]
Y = One_Hot_Encode(Y)

X_train = X[0:int(X.shape[0] * 0.9)]
Y_train = Y[0:int(X.shape[0] * 0.9)]

X_test = X[int(X.shape[0] * 0.9):]
Y_test = Y[int(X.shape[0] * 0.9):]

X_train /= 255.0

model = ML_MultiLogisticRegression()

model.fit(X_train, Y_train, epochs=800, showfig=True, learning_rate=0.1)

X_test /= 255.0
test_acc = model.evaluate(X_test, Y_test)
コード例 #13
0
    plt.xlabel("Epoch")
    plt.show()
    for i in range(4):
        gen_input = np.float32(np.random.uniform(size=[1, 1]))
        generation = generative_net(
            gen_input
        )  # we need to keep the variable type around, to compute stuff

        plt.imshow(np.reshape(generation.data, newshape=[28, 28]).transpose())
        plt.show()


if __name__ == "__main__":
    n_iter = 500
    batch_size = 50
    train_data, test_data = get_mnist(n_train=1000,
                                      n_test=100,
                                      with_label=False,
                                      classes=[0],
                                      n_dim=3)
    train_iter = RandomIterator(train_data, batch_size)
    test_iter = RandomIterator(test_data, batch_size)

    discriminative_net = networks.DiscriminativeMLP(n_hidden=20)
    generative_net = networks.GenerativeMLP(n_hidden=200)

    discriminative_net = networks.DiscriminativeConvolutional().to_gpu()
    generative_net = networks.GenerativeDeconvolutional(256).to_gpu()

    train()
コード例 #14
0
ファイル: main.py プロジェクト: ahmedassal/ml-playground
    # data = (np.float32(data) / 255.).reshape(64, 28, 28)
    # data -= np.mean(data)
    # for i in xrange(64):
    #     print i
    #     coder = SpatialCoder(data[i])
    #     # result = coder.gabor_encode()
    #     result = coder.gabor_convolve()
    #     # result = coder.hog_encode()
    #     result = coder.cluster_lvl1(result)
    #     plt.subplot(8, 8, i)
    #     plt.xticks(())
    #     plt.yticks(())
    #     plt.imshow(result, interpolation='none')
    # plt.show()

    data, target = get_mnist(20000, 20001)
    data = (np.float32(data) / 255.).reshape(28, 28)
    data -= np.mean(data)
    coder = SpatialCoder(data)
    omap = coder.gabor_convolve()
    # result = coder.cluster_lvl1(omap)

    # plt.subplot(2, 1, 0)
    # plt.xticks(())
    # plt.yticks(())
    plt.imshow(omap, interpolation='none')
    # plt.subplot(2, 1, 1)
    # plt.xticks(())
    # plt.yticks(())
    # plt.imshow(result, interpolation='none')
    plt.show()
コード例 #15
0
ファイル: bm.py プロジェクト: ahmedassal/ml-playground
def get_preprocessed_mnist(start, end):
    data, target = get_mnist(start, end)
    return binarize(np.vstack([
        resize(item.reshape(28, 28), (dim, dim)).ravel()
        for item in data
    ]))
コード例 #16
0
			y[i] = max_value_count

		return y


	# Score function gives accuracy of prediction
	def score(self, X, y):
		pred = self.predict(X)
		return np.mean( y == pred)


if __name__ == '__main__':
	
	# getting the data (NOTE: I have used only 2000 data point.)
	# You can change this 
	X, y = get_mnist(2000)

	Ntrain = int(0.7 * len(X))
	# Train and Test splits 
	Xtrain, ytrain = X[:Ntrain], y[:Ntrain]
	Xtest, ytest = X[Ntrain:], y[Ntrain:]
	train_score = []
	test_score = []


	# choosing optimal K is difficult
	# I have used K = 1 to 5
	# You can change K as per your requirement 
	for k in (1, 2, 3, 4, 5):

		t0 = datetime.now()
コード例 #17
0
# ROFFO GIORGIO

from __future__ import print_function, division
from builtins import range, input

import matplotlib.pyplot as plt

import warnings
warnings.filterwarnings("ignore", category=FutureWarning)

from Classifier import BayesGMM
from utils import get_mnist

# Load a dataset: in this case I'm working on MNIST
X, Y = get_mnist()

# Initialize the Bayesian Classifier wit Gaussian Mixture Models
clf = BayesGMM()

# Train the classifier
clf.fit(X, Y)

# show one sample for each class
# also show the mean image learned
for k in range(clf.K):

    sample, mean = clf.sample_given_y(k)

    plt.subplot(1, 2, 1)
    plt.imshow(sample, cmap='gray')
    plt.title("Sample")
コード例 #18
0
def main():

    train_x, train_y, valid_x, valid_y, test_x, test_y = get_mnist()

    num_epochs = args.epochs
    eta = args.lr
    batch_size = args.batch_size

    # input
    x = T.matrix("x")
    y = T.ivector("y")

    #x.tag.test_value = np.random.randn(3, 784).astype("float32")
    #y.tag.test_value = np.array([1,2,3])
    #drop_switch.tag.test_value = 0
    #import ipdb; ipdb.set_trace()
    hidden_1 = BinaryDense(input=x, n_in=784, n_out=2048, name="hidden_1")
    act_1 = Activation(input=hidden_1.output, activation="relu", name="act_1")
    hidden_2 = BinaryDense(input=act_1.output,
                           n_in=2048,
                           n_out=2048,
                           name="hidden_2")
    act_2 = Activation(input=hidden_2.output, activation="relu", name="act_2")
    hidden_3 = BinaryDense(input=act_2.output,
                           n_in=2048,
                           n_out=2048,
                           name="hidden_3")
    act_3 = Activation(input=hidden_3.output, activation="relu", name="act_3")
    output = BinaryDense(input=act_3.output,
                         n_in=2048,
                         n_out=10,
                         name="output")
    softmax = Activation(input=output.output,
                         activation="softmax",
                         name="softmax")

    # loss
    xent = T.nnet.nnet.categorical_crossentropy(softmax.output, y)
    cost = xent.mean()

    # errors
    y_pred = T.argmax(softmax.output, axis=1)
    errors = T.mean(T.neq(y, y_pred))

    # updates + clipping (+-1)
    params_bin = hidden_1.params_bin + hidden_2.params_bin + hidden_3.params_bin
    params = hidden_1.params + hidden_2.params + hidden_3.params
    grads = [T.grad(cost, param)
             for param in params_bin]  # calculate grad w.r.t binary parameters
    updates = []
    for p, g in zip(
            params, grads
    ):  # gradient update on full precision weights (NOT binarized wts)
        updates.append((p, clip_weights(p - eta * g))  #sgd + clipping update
                       )

    # compiling train, predict and test fxns
    train = theano.function(inputs=[x, y], outputs=cost, updates=updates)
    predict = theano.function(inputs=[x], outputs=y_pred)
    test = theano.function(inputs=[x, y], outputs=errors)

    # train
    checkpoint = ModelCheckpoint(folder="snapshots")
    logger = Logger("logs/{}".format(time()))
    for epoch in range(num_epochs):

        print "Epoch: ", epoch
        print "LR: ", eta
        epoch_hist = {"loss": []}

        t = tqdm(range(0, len(train_x), batch_size))
        for lower in t:
            upper = min(len(train_x), lower + batch_size)
            loss = train(train_x[lower:upper],
                         train_y[lower:upper].astype(np.int32))
            t.set_postfix(loss="{:.2f}".format(float(loss)))
            epoch_hist["loss"].append(loss.astype(np.float32))

        # epoch loss
        average_loss = sum(epoch_hist["loss"]) / len(epoch_hist["loss"])
        t.set_postfix(loss="{:.2f}".format(float(average_loss)))
        logger.log_scalar(tag="Training Loss", value=average_loss, step=epoch)

        # validation accuracy
        val_acc = 1.0 - test(valid_x, valid_y.astype(np.int32))
        print "Validation Accuracy: ", val_acc
        logger.log_scalar(tag="Validation Accuracy", value=val_acc, step=epoch)
        checkpoint.check(val_acc, params)

    # Report Results on test set
    best_val_acc_filename = checkpoint.best_val_acc_filename
    print "Using ", best_val_acc_filename, " to calculate best test acc."
    load_model(path=best_val_acc_filename, params=params)
    test_acc = 1.0 - test(test_x, test_y.astype(np.int32))
    print "Test accuracy: ", test_acc
コード例 #19
0
                    help="initialize data augmentation",
                    action='store_true')
parser.add_argument('-s', "--dataset", help="choose 'mnist' or 'cifar10' ")
args = parser.parse_args()

with open('config/model.json') as f:
    model_config = json.load(f)
with open('config/training.json') as f:
    train_config = json.load(f)

if __name__ == "__main__":

    model = get_SimpleNet(model_config['0'])

if args.dataset == 'mnist':
    X_train, Y_train, X_test, Y_test = get_mnist(model_config['0'])
else:
    X_train, Y_train, X_test, Y_test = get_cifar10(model_config['0'])

train_batch_size = train_config['train_batch_size']
train_validation_split = train_config['train_validation_split']
epochs = train_config['epochs']

if args.data_aug:
    #Image Data Generator
    datagen = ImageDataGenerator(width_shift_range=0.2,
                                 horizontal_flip=True,
                                 height_shift_range=0.2,
                                 rotation_range=20)
    datagen.fit(X_train)
    checkpointer = ModelCheckpoint(filepath='saved_models/' + model.name +
コード例 #20
0
import pandas
import datetime as dt
from nn.layer import *
from nn.criterion import *
from optim.optimizer import *
from util.common import *
from dataset.transformer import *
from dataset import mnist
from utils import get_mnist
init_engine()

# Get and store MNIST into RDD of Sample, please edit the "mnist_path" accordingly.
mnist_path = "datasets/mnist"

(train_data, test_data) = get_mnist(sc, mnist_path)
print train_data.count()
print test_data.count()
# Parameters
learning_rate = 0.2

training_epochs = 15
batch_size = 2048
display_step = 1
# Network Parameters
n_hidden_1 = 256 # 1st layer number of features

n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)

def multilayer_perceptron(n_hidden_1, n_hidden_2, n_input, n_classes):
コード例 #21
0
import Encoder as e
import math
from sklearn import neighbors, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
n_epoch = 20
n_train_per_class = 5000
batchsize = 128
weight_decay = 0.000025
initial_alpha = 0.0002
final_alpha = 0.000002
beta_1 = 0.5
beta_2 = 0.999
latent_dim = 50
train_data, test_data = u.get_mnist(n_train=n_train_per_class,
                                    n_test=100,
                                    with_label=True,
                                    classes=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
#train_data, test_data  = chsets.get_mnist()
# get the correct total length
n_train = train_data._length

# the discriminator is tasked with classifying examples as real or fake
Disc = CustomClassifier(predictor=d.Discriminator(latent_dim),
                        lossfun=f.sigmoid_cross_entropy)
Disc.compute_accuracy = False
Gen = g.Generator()
Enc = e.Encoder(latent_dim)

# Use Adam optimizer
# learning rate, beta1, beta2
disc_optimizer = optimizers.Adam(initial_alpha, beta1=beta_1, beta2=beta_2)
コード例 #22
0
ファイル: run.py プロジェクト: rocknrollnerd/vae
from utils import get_mnist, sample_reconstructions, plot_samples
from vae import SampleLayer, vae_log_likelihood

import theano
import theano.tensor as T
import lasagne
from lasagne.nonlinearities import rectify, identity

import numpy as np
from sklearn.cross_validation import train_test_split


if __name__ == '__main__':
    data = get_mnist()

    train, test = train_test_split(data, test_size=0.1)

    _train = theano.shared(train, borrow=True)
    _test = theano.shared(test, borrow=True)

    batch_size = 500
    latent_size = 2

    target = T.matrix()

    encoder = lasagne.layers.InputLayer((None, train.shape[1]), target)
    encoder = lasagne.layers.DenseLayer(encoder, num_units=100, nonlinearity=rectify)
    mean = lasagne.layers.DenseLayer(encoder, num_units=latent_size, nonlinearity=identity)
    log_sigma = lasagne.layers.DenseLayer(encoder, num_units=latent_size, nonlinearity=identity)
    z = SampleLayer(mean=mean, log_sigma=log_sigma)
    decoder1 = lasagne.layers.DenseLayer(z, num_units=100, nonlinearity=rectify)
コード例 #23
0
    os.makedirs(
        figsdir, exist_ok=True
    )  # exist_ok makes function do nothing if directory already exists

    input_type = 'image'
    representation_type = 'image'
    output_type = ['image']
    output_dim = [(3, 28, 28)]
    problem = 'privacy'
    input_dim = (3, 28, 28)
    eval_rate = 100  #500 # evaluate on last epoch
    gamma = 1
    epochs = 501
    batch_size = 2048

    trainset, testset = utils.get_mnist()

    network = variational_privacy_fairness.VPAF(
        input_type=input_type,
        representation_type=representation_type,
        output_type=output_type,
        problem=problem,
        gamma=gamma,
        input_dim=input_dim,
        output_dim=output_dim).to(device)
    network.apply(utils.weight_init
                  )  #apply calls the function recursively to each submodule
    network.train()
    network.fit(trainset,
                testset,
                epochs=epochs,
コード例 #24
0
cfg = {}
cfg['SGD_BATCHSIZE']    = 256
cfg['SGD_LEARNINGRATE'] = 0.001
cfg['NUM_EPOCHS']       = 10000
# changed from 10000

#cfg['ACTIVATION'] = 'relu'
cfg['ACTIVATION'] = 'tanh'
# How many hidden neurons to put into each of the layers
#cfg['LAYER_DIMS'] = [1024, 20, 20, 20]
#cfg['LAYER_DIMS'] = [32, 28, 24, 20, 16, 12, 8, 8]
cfg['LAYER_DIMS'] = [128, 64, 32, 16, 16] # 0.967 w. 128
#cfg['LAYER_DIMS'] = [20, 20, 20, 20, 20, 20] # 0.967 w. 128
ARCH_NAME =  '-'.join(map(str,cfg['LAYER_DIMS']))
trn, tst = utils.get_mnist()

# Where to save activation and weights data
cfg['SAVE_DIR'] = 'rawdata/' + cfg['ACTIVATION'] + '_' + ARCH_NAME


# In[ ]:


input_layer  = keras.layers.Input((trn.X.shape[1],))
clayer = input_layer
for n in cfg['LAYER_DIMS']:
    clayer = keras.layers.Dense(n, activation=cfg['ACTIVATION'])(clayer)
output_layer = keras.layers.Dense(trn.nb_classes, activation='softmax')(clayer)

model = keras.models.Model(inputs=input_layer, outputs=output_layer)
コード例 #25
0
ファイル: bi-rnn.py プロジェクト: mcomans/qpe-experiments
    redire_spark_logs()
    show_bigdl_info_logs()
    init_engine()
    # Parameters
    batch_size = int(options.batchSize)
    learning_rate = float(options.learningRate)
    learning_rate_decay = float(options.learningrateDecay)
    if options.action == "train":

        def get_end_trigger():
            if options.endTriggerType.lower() == "epoch":
                return MaxEpoch(options.endTriggerNum)
            else:
                return MaxIteration(options.endTriggerNum)

        train_data = get_mnist(sc, "train", options.dataPath)\
             .map(lambda rec_tuple: (normalizer(rec_tuple[0], mnist.TRAIN_MEAN, mnist.TRAIN_STD),rec_tuple[1])).map(lambda t: Sample.from_ndarray(t[0], t[1]))
        test_data = get_mnist(sc, "test", options.dataPath)\
        .map(lambda rec_tuple: (normalizer(rec_tuple[0], mnist.TEST_MEAN, mnist.TEST_STD), rec_tuple[1])).map(lambda t: Sample.from_ndarray(t[0], t[1]))

        train_data = train_data.sample(False, float(options.trainingSetSize))

        optimizer = Optimizer(model=build_model(n_input, n_hidden, n_classes),
                              training_rdd=train_data,
                              criterion=CrossEntropyCriterion(),
                              optim_method=SGD(
                                  learningrate=learning_rate,
                                  learningrate_decay=learning_rate_decay),
                              end_trigger=get_end_trigger(),
                              batch_size=options.batchSize)
        optimizer.set_validation(batch_size=options.batchSize,
                                 val_rdd=test_data,
コード例 #26
0
ファイル: dec.py プロジェクト: arseninaanna/dec
            if acc is not None:
                log += ', accuracy %.5f' % (acc)
            print(log)

            if batch_number != 0 and delta <= threshold:
                return

        idx = current_slice()
        p = target_prediction(dec_model, x)
        loss = dec_model.train_on_batch(x[idx], p[idx])

        batch_number += 1


if __name__ == '__main__':
    X, Y = utils.get_mnist()
    print("MNIST loaded")

    encoder = utils.load_model('encoder')
    print("Encoder loaded")

    fl = False
    try:
        dec_model = utils.load_model('dec')
        print('DEC loaded')
    except EnvironmentError:
        dec_model = get_model(encoder, X)

        print('Training DEC')
        train_dec(dec_model, X, Y)
コード例 #27
0
        ])

        model.compile(loss='mse', optimizer=SGD(lr=lr, decay=0, momentum=momentum))
        model.fit(outputs[i], outputs[i], batch_size=batch_size, epochs=pt_epochs, callbacks=[lr_schedule], verbose=1)

        enc_model = Sequential([enc_i])
        enc_model.compile(loss='mse', optimizer=SGD(lr=lr, decay=0, momentum=momentum))
        out = enc_model.predict(outputs[i])

        outputs.append(out)

    autoencoder = Sequential(encoders + decoders[::-1])
    autoencoder.compile(loss='mse', optimizer=SGD(lr=lr, decay=0, momentum=momentum))
    autoencoder.fit(x, x, batch_size=batch_size, epochs=ae_epochs)

    encoder = Sequential(encoders)
    encoder.compile(loss='mse', optimizer=SGD(lr=lr, decay=0, momentum=momentum))

    return encoder


if __name__ == '__main__':
    X, Y = utils.get_mnist()
    if TESTING:
        X, Y = utils.get_mnist(35000)

    encoder = train_autoencoder(X)
    utils.save_model('encoder', encoder)

    print("Encoder trained and stored")
コード例 #28
0
ファイル: bernoulli.py プロジェクト: ahmedassal/ml-playground
    def sample(self, arr):
        return np.random.binomial(n=1, p=arr)

    def sample_h_given_v(self, v):
        probs = self.propup(v)
        states = self.sample(probs)
        return probs, states

    def sample_v_given_h(self, h):
        probs = self.propdown(h)
        states = self.sample(probs)
        return probs, states

if __name__ == '__main__':
    data, target = get_mnist(random=True, num=1000)
    data = binarize(data, 0., 1.)

    dim = 28
    h = 100
    x = 10
    y = 10

    rbm = BernoulliRBM(
        dim * dim, h, learning_rate=0.1,
        sparsity_target=0.05,
        regulatization_param=0.01
        )
    rbm.train(data, epochs=1000)

    for i in xrange(h):
コード例 #29
0
ファイル: train3.py プロジェクト: eladb3/DTN
def train(batch_size, Epochs = float("Inf"), 
          L_times = {},
          weights = {"L_TID":15, "L_CONST":15},
          save = True, cont = False, plt_ = True , hours = float("Inf"), lr = 0.0003, show = False):
    base = f"./models/trainings/{utils.get_local_time()}"
    params = locals()
    os.mkdir(base)
    with open(f"{base}/params.txt", 'wt') as f: f.write(str(params))
    with open(f"{base}/params_dict", 'wb') as f: pickle.dump(params, f)
    hours = hours * (60 * 60) # sec in hour
    
    for k in L_times.keys():
        if isinstance(k, int):
            L_times[k] = lambda x: L_times[k]
            
    
    if cont:
        g = torch.load(f"{cont}/g_net")
        D = torch.load(f"{cont}/D_net")
    else:
        g = Nets.g_net
        D = Nets.D_net
    g, D = g.to(device), D.to(device)
    f = Nets.f_net_features.to(device)
    opt_g = optim.Adam(g.parameters(), lr=  lr, betas=(0.5, 0.999))
    opt_D = optim.Adam(D.parameters(), lr=  lr, betas=(0.5, 0.999))
    
    opts = {'g':opt_g, 'D':opt_D}
    dl = {'s':utils.get_svhn(batch_size) , 't':utils.get_mnist(batch_size)}
    dl_test = utils.get_svhn(32, "test")
    dl_test_mnist = utils.get_mnist(32, "test")
    
    names = ['L_GANG', 'L_CONST', 'L_D', 'L_TID']
    prob_types = ['trans', 'real_trans', 'real']
    cum_norm, cum_loss, n = {c:0 for c in names}, {c:[] for c in names}, {c:0 for c in names}
    cum_prob = {typ:[] for typ in prob_types}
    
    def myplt(path = None):
            g.eval() ; D.eval()
            print(f" >>>> Epoch {e} - Svhn")
            
            plt.figure(figsize = (15, 10))
            x ,_  = next(iter(dl_test))
            x = x[:4, :, :, :]
            utils.plt_row_images(x.cpu())
            if path: plt.savefig(f"{path}/svhn.jpg")
            if show: plt.show()
            plt.figure(figsize = (15, 10))
            x = x.to(device)
            y = g(f(x))
            utils.plt_row_images(y.cpu())
            if path: plt.savefig(f"{path}/svhn_G.jpg")
            if show: plt.show()

            print(f" >>>> Epoch {e} - Mnist")
            plt.figure(figsize = (15, 10))
            x ,_  = next(iter(dl_test_mnist))
            x = x[:4, :, :, :]
            utils.plt_row_images(x.cpu())
            if path: plt.savefig(f"{path}/mnist.jpg")
            if show: plt.show()
            plt.figure(figsize = (15, 10))
            x = x.to(device)
            with torch.no_grad():
                y = g(f(x))
            utils.plt_row_images(y.cpu())
            if path: plt.savefig(f"{path}/mnist_G.jpg")
            if show: plt.show()
            
            plt.figure(figsize = (15, 10))
            for i, name in enumerate(names):
                plt.subplot(2,2,i+1)
                plt.title(name)
                cp = cum_loss[name]
                if len(cp) > 200: cp = cp[50:]
                plt.plot(cp)
            if path: plt.savefig(f"{path}/loss.jpg")
            if show: plt.show()
            
            plt.figure(figsize = (15, 10))
            for typ in prob_types:
                cp = cum_prob[typ]
                plt.plot(cp, label = typ)
            plt.legend()
            plt.title("Disc Probs")
            if path: plt.savefig(f"{path}/probs.jpg")
            if show: plt.show()
            g.train() ; D.train()

    
    
    
    def tr_step(name, x, times = 1):
        c = 'D' if n in ['L_D'] else 'g'
        if times == 0:
            cum_loss[name].append(cum_loss[name][-1] if len(cum_loss[name]) else 0)
            return
        closs = 0
        cp = {typ:0 for typ in prob_types}
        for i in range(times):
            loss = get_loss(name, x, f, g, D, weight = weights.get(name, 1))
            
            if name == "L_D":
                loss, p = loss
                for typ in p:
                    cp[typ] += p[typ].cpu().item()
    
            closs += loss
            opts[c].zero_grad() ; loss.backward() ; opts[c].step()
        cum_loss[name].append(closs.cpu().item() / times)
        if name == "L_D":
            for typ in prob_types:
                cum_prob[typ].append(cp[typ] / times)
        n[name] += times
    
    start_time = time.time()
    e = 0
    while True:
    
        x = {}
        iters = {k:iter(dl[k]) for k in dl}
        i = 0
        while get_next_batch(x, iters):
            for k in x: x[k] = x[k].to(device)
            
            tr_step('L_D', x, times = L_times.get("L_D", lambda x: 1)(i))
            tr_step('L_GANG', x, times = L_times.get("L_GANG", lambda x: 1)(i))
            tr_step('L_CONST', x, times = L_times.get("L_CONST", lambda x: 0 if x % 10 else 1)(i))
            tr_step('L_TID', x, times = L_times.get("L_TID", lambda x: 1)(i))

            i += 1
            if i % 500 == 0 and i > 1:
                myplt()


        print(f">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> EPOCH {e} END")
        if save and e > 0 and e % 5 == 0:
            os.mkdir(f"{base}/{e}")
            myplt(path = f"{base}/{e}")
            for name, model in [("g_net", g), ("D_net", D)]:
                torch.save(model.cpu(), f"{base}/{e}/{name}")
                model.to(device)
            print(f"CP -- models saved to {base}/{e}/{name}")
        
        e += 1
        if e > Epochs: break
        if (time.time() - start_time) > hours: break
        
    if save:
        for name, model in [("g_net", g), ("D_net", D)]:
            torch.save(model.cpu(), f"{base}/{name}")
    
    utils.rmdir_if_empty(base)