Esempio n. 1
0
    def get_data(self):
        with tf.name_scope('data'):
            #1.construct the dataset for train\validate\test.
            train_data, eval_data, test_data = utils.get_mnist_dataset(
                self.batch_size)  # without val_data
            iterator = tf.data.Iterator.from_structure(
                train_data.output_types, train_data.output_shapes)
            #print(train_data.output_shapes)    #((None, 28, 28), (None, 10))
            self.train_init = iterator.make_initializer(
                train_data)  # initializer for train_data
            self.eval_init = iterator.make_initializer(eval_data)
            self.test_init = iterator.make_initializer(test_data)

            #2.construct the dataset only for dumping the inter-layers
            test = utils.parse_data('data/mnist', 't10k',
                                    False)  #refer to parse_data function
            test_set = tf.data.Dataset.from_tensor_slices(
                test)  #construct the dataset
            iterator1 = test_set.make_one_shot_iterator()

            if self.dump == False:
                img, self.label = iterator.get_next(
                )  #fetch batch-size samples, but not always equals to batch_size.
                #print(img.shape, self.label.shape)             #(?, 28, 28)  (?, 10)
            else:
                img, self.label = iterator1.get_next()
                #print(img.shape, self.label.shape)             #(28, 28)  (10, )
                length = self.label.shape[0]  #equals to label.size
                self.label = tf.reshape(
                    self.label,
                    shape=[1, length])  #define the shaps as (1, 10) forcedly

            self.img = tf.reshape(
                img, shape=[-1, 28, 28,
                            1])  # -1 represents not-specific, here should be 1
Esempio n. 2
0
 def get_data(self):
     with tf.name_scope('data'):
         train_data, test_data = utils.get_mnist_dataset(self.batch_size)
         iterator = tf.data.Iterator.from_structure(
             train_data.output_types, train_data.output_shapes)
         img, self.label = iterator.get_next()
         self.img = tf.reshape(img, shape=[-1, 28, 28, 1])
         self.train_init = iterator.make_initializer(train_data)
         self.test_init = iterator.make_initializer(test_data)
    def get_data(self):
        with tf.name_scope('data'):
            train_data, test_data = utils.get_mnist_dataset(self.batch_size)
            iterator = tf.data.Iterator.from_structure(train_data.output_types, 
                                                   train_data.output_shapes)
            img, self.label = iterator.get_next()
            self.img = tf.reshape(img, shape=[-1, 28, 28, 1])
            # reshape the image to make it work with tf.nn.conv2d

            self.train_init = iterator.make_initializer(train_data)  # initializer for train_data
            self.test_init = iterator.make_initializer(test_data)    # initializer for train_data
Esempio n. 4
0
    def get_data(self):
        with tf.name_scope('data'):
            train_data, test_data = utils.get_mnist_dataset(self.batch_size) # without val_data
            iterator = tf.data.Iterator.from_structure(train_data.output_types, 
                                                   train_data.output_shapes)
            img, self.label = iterator.get_next()               #fetch batch-size samples
            self.img = tf.reshape(img, shape=[-1, 28, 28, 1])   # -1 represents not-specific
            # reshape the image to make it work with tf.nn.conv2d

            self.train_init = iterator.make_initializer(train_data)  # initializer for train_data
            self.test_init = iterator.make_initializer(test_data)    # initializer for train_data
Esempio n. 5
0
    def get_data(self, isCheck=False):
        with tf.name_scope('data'):
            train_data, test_data = utils.get_mnist_dataset(self.batch_size)
            iterator = tf.data.Iterator.from_structure(
                train_data.output_types, train_data.output_shapes)
            img, self.label = iterator.get_next()
            self.img = tf.reshape(img, shape=[-1, 28, 28, 1])
            # reshape the image to make it work with tf.nn.conv2d

            self.train_init = iterator.make_initializer(
                train_data)  # initializer for train_data
            self.test_init = iterator.make_initializer(
                test_data)  # initializer for test_data
Esempio n. 6
0
    def get_data(self):
        with tf.name_scope('data'):
            mnist_folder = 'data/mnist'
            train_data, test_data = utils.get_mnist_dataset(
                self.batch_size, mnist_folder=mnist_folder)

            iterator = tf.data.Iterator.from_structure(
                train_data.output_types, train_data.output_shapes)
            img, self.label = iterator.get_next()
            self.img = tf.reshape(img, shape=[-1, 28, 28, 1])
            # 上面生成的iterator通过下面函数进行下一步迭代,run epoch关键就在这里, iterator每次切换用train和test初始化过程中
            # self.img分别切换为train和test的iterator并执行next使得当前img卫队鹰操作需要数据的下一批次数据
            self.train_init = iterator.make_initializer(
                train_data)  # initializer for train_data
            self.test_init = iterator.make_initializer(
                test_data)  # initializer for test_data
Esempio n. 7
0
    def get_data(self):
        with tf.name_scope('data'):
            # Fetch data
            train_data, test_data = utils.get_mnist_dataset(
                self.data_path, self.batch_size)

            # Create Iterator to get samples from the two dataset
            iterator = tf.data.Iterator.from_structure(train_data.output_types,
                                                   train_data.output_shapes)
            img, self.label = iterator.get_next()

            # Reshape the image to make it work with tf.nn.conv2d
            self.img = tf.reshape(img, shape=[-1, 28, 28, 1])

            # Initializer for train and test data
            self.train_init = iterator.make_initializer(train_data)
            self.test_init = iterator.make_initializer(test_data)
import matplotlib.pyplot as plt

import sys
sys.path.append('..')
from utils import get_mnist_dataset

# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# Hyperparameters
num_classes = 10
num_epochs = 10
batch_size = 100
learning_rate = 1e-3

train_loader, test_loader = get_mnist_dataset(batch_size)

class ConvNet(nn.Module):
    def __init__(self, input_size, output_size):
        super(ConvNet, self).__init__()
        self.layer1 = nn.Sequential(
            nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2),
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2))
        self.layer2 = nn.Sequential(
            nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2))
        self.fc = nn.Linear(7*7*32, num_classes)
Esempio n. 9
0
import utils

# Define paramaters for the model
learning_rate = 0.01
batch_size = 128
n_epochs = 20
n_train = 60000
n_test = 10000
MNIST = True

# convolution layer parameters
n_filters = 5
filter_size = 5

# Step 1: Get data
train_data, test_data = utils.get_mnist_dataset(batch_size, MNIST)

iterator = tf.data.Iterator.from_structure(train_data.output_types,
                                           train_data.output_shapes)
img, label = iterator.get_next()
img = tf.cast(img, tf.float32)
img = tf.reshape(img, [-1, 28, 28, 1])

train_init = iterator.make_initializer(
    train_data)  # initializer for train_data
test_init = iterator.make_initializer(test_data)  # initializer for train_data

# Step 2: Set layers structure

H1 = utils.conv_layer(img, n_filters, filter_size)
H1_relu = tf.nn.relu(H1)
Esempio n. 10
0
    argparser.add_argument(
        '--subset_validation',
        type=int,
        default=1000 * 1000 * 1000,
        help='Number of validation samples to compute marginal '
        'log-likelihood on.')

    args = argparser.parse_args()

    if args.model not in available_models:
        raise ValueError("Unknown model name: {}".format(args.model))

    evaluate_every = dict(zip(args.multisamples, args.evaluate_every))

    model_class = getattr(models, args.model)
    dataset = utils.get_mnist_dataset()

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        train_mean = dataset.train.images.mean(axis=0)
        output_bias = -np.log(1. / np.clip(train_mean, 0.001, 0.999) - 1.)

        dvae = model_class(
            code_size=args.code_size,
            input_size=28 * 28,
            prior_p=args.prior_proba,
            lam=args.lam,
            tau=args.tau,
            relaxation_distribution=args.relaxation_distribution,
            output_bias=output_bias,
            batch_size=args.batch_size,
            multisample_ks=args.multisamples,
Esempio n. 11
0
        fig, ax = plt.subplots()
        for i in range(images.shape[0]):
            _ = fig.add_subplot(1, images.shape[0] + 1, i + 1)
            _ = plt.imshow(images[i], cmap='gray')
        [f.set_axis_off() for f in fig.axes]
        fig.set_size_inches([images.shape[0], 1])
        plt.savefig(name)
    model1.close_session()
    model2.close_session()
    print("Recombined Accuracies:", accuracies, "Mean:", np.mean(accuracies),
          "\n\n")
    return np.mean(accuracies), accuracies


if __name__ == "__main__":
    from utils import get_mnist_dataset
    from MNIST import MNIST_model
    mnist = get_mnist_dataset()
    hyperparams = json.load(open('hyperparameters.json'))
    evaluate_pair(mnist,
                  MNIST_model,
                  rand_pen=hyperparams['rand_pen'],
                  reg_strength=hyperparams['reg_strength'],
                  learning_rate=hyperparams['learning_rate'],
                  sparse_training=hyperparams['sparse_update'],
                  update_percent=hyperparams['update_percent'],
                  random_slope=hyperparams['random_slope'],
                  n_iterations=hyperparams['n_iterations'],
                  n_recombinations=hyperparams['n_recombinations'],
                  print_every=hyperparams['print_every'])
Esempio n. 12
0
import utils
mnist_folder = '../../data/mnist'
train_data, test_data = utils.get_mnist_dataset(1, mnist_folder=mnist_folder)
print(train_data)