コード例 #1
0
def load_data():
    """
    1. Load the CIFAR-10 data set.
    It consists of 32x32 RGB images in 10 classes, with 6000 images per class.
    There are 50000 training images and 10000 test images.

    2. Converts first row to int8 as they represent different classes
        airplane : 0, automobile : 1, bird : 2, cat : 3, deer : 4, dog : 5, frog : 6, horse : 7, ship : 8, truck : 9

    3. Creates a dataset with a separate element for each row of
        the input tensor,
        repeating dataset indefinitely and shuffling it,
        then forming batches of 64 images

    4. Creating an iterator to iterate over the inputs and labels

    Returns:
        tuples of train, test, handler, labels

    """
    train_data, test_data = observations.cifar10('data/cifar',)
    test_data = test_data[0], test_data[1].astype(
        np.uint8)  # Fix test_data dtype

    train = tf.data.Dataset.from_tensor_slices(
        train_data).repeat().shuffle(10000).batch(64)
    test = tf.data.Dataset.from_tensors(test_data).repeat()

    handle = tf.placeholder(tf.string, [])
    itr = tf.data.Iterator.from_string_handle(
        handle, train.output_types, train.output_shapes)
    inputs, labels = itr.get_next()

    return train, test, handle, inputs, labels
コード例 #2
0
ファイル: cnn.py プロジェクト: kekeblom/bayesian_optimization
    def __call__(self, flags):
        (train_X, train_Y), (test_X, test_Y) = observations.cifar10('data/')

        train = TensorDataset(torch.Tensor(train_X), torch.Tensor(train_Y))
        test = TensorDataset(torch.Tensor(test_X), torch.Tensor(test_Y))

        train_loader = DataLoader(train,
                                  batch_size=flags.batch_size,
                                  shuffle=True,
                                  pin_memory=self.use_cuda)

        model = ResNet(flags, np.unique(train_Y).size)

        if self.use_cuda:
            model = model.cuda()

        optimizer = SGD(model.parameters(),
                        momentum=float(flags.momentum),
                        lr=float(flags.lr))
        for epoch in range(50):
            print("Epoch {}".format(epoch))

            running_avg_loss = 0.0
            running_avg_weight = 0.01

            for X, Y in train_loader:
                X = Variable(X)
                Y = Variable(Y.long())
                if self.use_cuda:
                    X = X.cuda()
                    Y = Y.cuda()

                prediction = model(X)

                assert Y.size(0) == prediction.size(0)

                loss = nn.functional.cross_entropy(prediction, Y)

                loss.backward()
                optimizer.step()
                optimizer.zero_grad()

                running_avg_loss = running_avg_loss * (
                    1 - running_avg_weight) + (running_avg_weight *
                                               loss.data.cpu().numpy())

            print("Loss: ", running_avg_loss)

        return self._test_accuracy(model, test)
コード例 #3
0
def load_data(dataset,
              train_pct=1.0,
              root_dir: str = '/home/mirgahney/Projects/datasets'):
    if dataset == "cifar":
        (Xtrain, Ytrain), (Xtest,
                           Ytest) = observations.cifar10(f'{root_dir}/cifar')
        Xtrain = np.transpose(Xtrain, [0, 2, 3, 1])
        Xtest = np.transpose(Xtest, [0, 2, 3, 1])
        mean = Xtrain.mean((0, 1, 2))
        std = Xtrain.std((0, 1, 2))
        Xtrain = (Xtrain - mean) / std
        Xtest = (Xtest - mean) / std
        if train_pct < 1.0:
            Xvalid, Xtrain, Yvalid, Ytrain = train_test_split(
                Xtrain, Ytrain, stratify=Ytrain, test_size=train_pct)
        print(Xtrain.shape)

    elif dataset == "fashion_mnist":
        (Xtrain, Ytrain), (
            Xtest,
            Ytest) = observations.fashion_mnist(f'{root_dir}/fashion_mnist')
        mean = Xtrain.mean(axis=0)
        std = Xtrain.std()
        Xtrain = (Xtrain - mean) / std
        Xtest = (Xtest - mean) / std
        Xtrain = Xtrain.reshape(-1, 28, 28, 1)
        Xtest = Xtest.reshape(-1, 28, 28, 1)
        if train_pct < 1.0:
            Xvalid, Xtrain, Yvalid, Ytrain = train_test_split(
                Xtrain, Ytrain, stratify=Ytrain, test_size=train_pct)
        print(Xtrain.shape)

    else:
        (Xtrain, Ytrain), (Xtest,
                           Ytest) = observations.mnist(f'{root_dir}/mnist')
        mean = Xtrain.mean(axis=0)
        std = Xtrain.std()
        Xtrain = (Xtrain - mean) / std
        Xtest = (Xtest - mean) / std
        Xtrain = Xtrain.reshape(-1, 28, 28, 1)
        Xtest = Xtest.reshape(-1, 28, 28, 1)
        Xvalid, Xtrain, Ytrain_2, Yvalid = train_test_split(
            Xtrain, Ytrain, stratify=Ytrain, test_size=train_pct)
        print(Xtrain.shape)

    return (Xtrain, Ytrain), (Xvalid, Yvalid), (Xtest, Ytest)
コード例 #4
0
ファイル: main.py プロジェクト: Mirgahney/DeepResCGP_SGHMC
def load_data():
    if flags.dataset == "cifar":
        (Xtrain, Ytrain), (Xtest, Ytest) = observations.cifar10(
            '/home/mirgahney/Projects/datasets/cifar')
        Xtrain = np.transpose(Xtrain, [0, 2, 3, 1])
        Xtest = np.transpose(Xtest, [0, 2, 3, 1])
        mean = Xtrain.mean((0, 1, 2))
        std = Xtrain.std((0, 1, 2))
        Xtrain = (Xtrain - mean) / std
        Xtest = (Xtest - mean) / std
        Xtrain_2, Xtrain, Ytrain_2, Ytrain = train_test_split(
            Xtrain, Ytrain, stratify=Ytrain, test_size=flags.train_pct)
        print(Xtrain.shape)

    elif flags.dataset == "fashion_mnist":
        (Xtrain, Ytrain), (Xtest, Ytest) = observations.fashion_mnist(
            '/home/mirgahney/Projects/datasets/fashion_mnist')
        mean = Xtrain.mean(axis=0)
        std = Xtrain.std()
        Xtrain = (Xtrain - mean) / std
        Xtest = (Xtest - mean) / std
        Xtrain = Xtrain.reshape(-1, 28, 28, 1)
        Xtest = Xtest.reshape(-1, 28, 28, 1)
        if flags.train_pct < 1.0:
            Xtrain_2, Xtrain, Ytrain_2, Ytrain = train_test_split(
                Xtrain, Ytrain, stratify=Ytrain, test_size=flags.train_pct)
        print(Xtrain.shape)

    else:
        (Xtrain, Ytrain), (Xtest, Ytest) = observations.mnist(
            '/home/mirgahney/Projects/datasets/mnist')
        mean = Xtrain.mean(axis=0)
        std = Xtrain.std()
        Xtrain = (Xtrain - mean) / std
        Xtest = (Xtest - mean) / std
        Xtrain = Xtrain.reshape(-1, 28, 28, 1)
        Xtest = Xtest.reshape(-1, 28, 28, 1)
        Xtrain_2, Xtrain, Ytrain_2, Ytrain = train_test_split(
            Xtrain, Ytrain, stratify=Ytrain, test_size=flags.train_pct)
        print(Xtrain.shape)

    return (Xtrain, Ytrain), (Xtest, Ytest)
コード例 #5
0
    def _load_data(self):
        (X_train, Y_train), (X_test, Y_test) = observations.cifar10('/tmp/cifar10')
        X_train = np.transpose(X_train, [0, 2, 3, 1]).astype(settings.float_type)
        X_test = np.transpose(X_test, [0, 2, 3, 1]).astype(settings.float_type)
        Y_train = Y_train.reshape(-1, 1)
        Y_test = Y_test.reshape(-1, 1)

        X_test = np.concatenate([X_train[self.flags.N:], X_test], axis=0)
        Y_test = np.concatenate([Y_train[self.flags.N:], Y_test], axis=0)
        X_train = X_train[0:self.flags.N]
        Y_train = Y_train[0:self.flags.N]

        assert(Y_train.shape[0] == self.flags.N)
        assert(X_train.shape[0] == self.flags.N)

        self.X_train = X_train
        self.Y_train = Y_train
        self.X_test = X_test
        self.Y_test = Y_test

        self._preprocess_data()
コード例 #6
0
ファイル: baseline.py プロジェクト: Anish144/libmodular
def run():
     # Load dataset
    (x_train_1, y_train), (x_test_1, y_test_1) = observations.cifar10(
        '~/data/cifar10')
    y_test = y_test_1.astype(np.uint8)  # Fix test_data dtype

    # (x_train_2, y_train_2), (x_test_2, y_test_2) = observations.svhn(
    #     '~/data/svhn')
    # y_test_2 = y_test_2.astype(np.uint8)  # Fix test_data dtype

    # Preprocessing
    x_train = np.transpose(x_train_1, [0, 2, 3, 1])
    x_test = np.transpose(x_test_1, [0, 2, 3, 1])

    # x_train = np.concatenate([x_train_1, x_train_2])
    # y_train = np.concatenate([y_train_1, y_train_2])
    # x_test = np.concatenate([x_test_1, x_test_2])
    # y_test = np.concatenate([y_test_1, y_test_2])

    dataset_size = x_train.shape[0]

    batch_size = 50
    num_batches = dataset_size / batch_size

    # Train dataset
    train = get_dataset(x_train, y_train, batch_size)

    # Test dataset
    test_batch_size = 100
    test = get_dataset(x_test, y_test, test_batch_size)

    # Handle to switch between datasets
    handle = tf.placeholder(tf.string, [])
    itr = tf.data.Iterator.from_string_handle(
        handle, train.output_types, train.output_shapes)
    data_indices, (inputs, labels) = itr.get_next()

    inputs_tr = tf.cast(inputs, tf.float32) / 255.0
    labels_cast = tf.cast(labels, tf.int32)

    def network():
        # 4 modular CNN layers
        activation = inputs_tr

        modules_list = [64, 64, 128, 128]
        for j in range(len(modules_list)):
            input_channels = activation.shape[-1]
            module_count = modules_list[j]
            filter_shape = [3, 3, input_channels, modules_list[j]]
            activation = modular.conv_layer(activation,
                                            filter_shape,
                                            strides=[1, 2, 2, 1],
                                            pool=False)

        flattened = tf.layers.flatten(activation)

        modules_list = [2, 1]
        units = 192
        for i in range(len(modules_list)):
            flattened = tf.layers.dense(flattened, modules_list[i] * units,
                                        activation=tf.nn.relu,
                                        kernel_initializer=tf.contrib.layers.xavier_initializer())
            flattened = modular.batch_norm(flattened)

        logits = tf.layers.dense(flattened, units=10)

        target = labels_cast
        loglikelihood = tf.reduce_mean(tf.distributions.Categorical(logits).log_prob(target))

        predicted = tf.argmax(logits, axis=-1, output_type=tf.int32)
        accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, target), tf.float32))

        return (loglikelihood, accuracy)

    template = tf.make_template('network', network)

    (ll,
     accuracy) = template()

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        opt = tf.train.AdamOptimizer(learning_rate=0.001).minimize(-ll)

    create_summary(tf.reduce_mean(ll), 'loglikelihood', 'scalar')
    create_summary(accuracy, 'accuracy', 'scalar')

    with tf.Session() as sess:
        time = '{:%Y-%m-%d_%H:%M:%S}'.format(datetime.datetime.now())

        if REALRUN=='True':
            test_writer = tf.summary.FileWriter(
                f'logs/test:Baseline_Advanced_CNN_tutorial_no_pool_{time}',
                sess.graph)
            writer = tf.summary.FileWriter(
                f'logs/train:Baseline_Advanced_CNN_tutorial_no_pool_{time}',
                sess.graph)

        general_summaries = tf.summary.merge_all()
        sess.run(tf.global_variables_initializer())
        train_dict = {handle: make_handle(sess, train)}
        test_dict = {handle: make_handle(sess, test)}


        for i in tqdm(range(100000)):

            # Sometimes generate summaries
            if i % 50 == 0:
                summaries = general_summaries
                _, summary_data = sess.run(
                    [opt, summaries],
                    train_dict)

                if REALRUN=='True':
                    writer.add_summary(summary_data, global_step=i)

                    summary_data = sess.run(summaries, test_dict)
                    test_writer.add_summary(summary_data, global_step=i)

                    accuracy_log = []
                    for test in range(x_test.shape[0] // test_batch_size):
                        test_accuracy = sess.run(accuracy, test_dict)
                        accuracy_log.append(test_accuracy)
                    final_accuracy = np.mean(accuracy_log)
                    summary = tf.Summary()
                    summary.value.add(tag='Test Accuracy',
                                      simple_value=final_accuracy)
                    test_writer.add_summary(summary, global_step=i)

            else:
                sess.run(opt, train_dict)

        if REALRUN == 'True':
            writer.close()
            test_writer.close()
コード例 #7
0
def images(hps):
    cifar10 = lambda: map(itemgetter(0), observations.cifar10(hps.path))
    imagenet32 = lambda: map(lambda x: np.transpose(x, (0, 3, 1, 2)),
                             observations.small32_imagenet(hps.path))
    imagenet64 = lambda: map(lambda x: np.transpose(x, (0, 3, 1, 2)),
                             observations.small64_imagenet(hps.path))

    def quartered(dataset, times=1):
        return lambda: map(lambda x: quarter_images_repeatedly(x, times=times),
                           dataset())

    def quarter_images_repeatedly(x, times):
        for _ in range(times):
            x = quarter_images(x)

        return x

    def quarter_images(x):
        size = x.shape[2]

        quarters = [
            x[:, :, :size // 2, :size // 2], x[:, :, size // 2:, :size // 2],
            x[:, :, :size // 2, size // 2:], x[:, :, size // 2:, size // 2:]
        ]

        return np.concatenate(quarters)

    def tiled(images, tile_size=32):
        num_tiles_y, num_tiles_x = images.shape[2] // tile_size, images.shape[
            3] // tile_size

        images = images[..., :num_tiles_y * tile_size, :num_tiles_x *
                        tile_size]

        images = np.concatenate(np.split(images, num_tiles_y, axis=2), axis=0)
        images = np.concatenate(np.split(images, num_tiles_x, axis=3), axis=0)

        return images

    def tiled_full(images, tile_size=32):
        num_tiles_y, num_tiles_x = images.shape[2] // tile_size, images.shape[
            3] // tile_size
        split_indices_y = [i * tile_size for i in range(1, num_tiles_y)]
        split_indices_x = [i * tile_size for i in range(1, num_tiles_x)]
        images = np.split(images, split_indices_y, axis=2)
        images = [np.split(im, split_indices_x, axis=3) for im in images]
        images = [tile for tiles in images for tile in tiles]

        return images

    datasets = {
        "cifar10": cifar10,
        "cifar10to16": quartered(cifar10),
        "cifar10to8": quartered(cifar10, times=2),
        "small32_imagenet": imagenet32,
        "small32to16_imagenet": quartered(imagenet32),
        "small32to8_imagenet": quartered(imagenet32, times=2),
        "small64_imagenet": imagenet64,
        "small64to32_imagenet": quartered(imagenet64),
        "small64to16_imagenet": quartered(imagenet64, times=2),
        "small64to8_imagenet": quartered(imagenet64, times=3),
    }

    full_imagenet_name = 'full_imagenet'
    if hps.dataset.startswith(full_imagenet_name):
        hps.eval_batch_size = 1
        hps.batch_size = 1
        if 'split' in hps.dataset:
            n, split = re.findall(
                'split_([0-9]*)_([0-9]*)',
                hps.dataset)[0]  # split_<n_per_split>_<split_num>
            return None, full_imagenet(hps.path, int(n), split=int(split))

        n = 50000 if hps.dataset == full_imagenet_name else int(
            hps.dataset[len(full_imagenet_name):])
        return None, full_imagenet(hps.path,
                                   n,
                                   rng=np.random.RandomState(int(hps.seed)))

    tiled_imagenet_name = "tiled_imagenet"
    if hps.dataset.startswith(tiled_imagenet_name):
        n = 50000 if hps.dataset == tiled_imagenet_name else int(
            hps.dataset[len(tiled_imagenet_name):])
        return None, (np.concatenate(
            [tiled(im) for im in full_imagenet(hps.path, n)], 0))

    hybrid_imagenet_name = "hybrid_imagenet"
    if hps.dataset.startswith(hybrid_imagenet_name):
        n = 50000 if hps.dataset == hybrid_imagenet_name else int(
            hps.dataset[len(hybrid_imagenet_name):])

        hps.eval_batch_size = 1
        hps.batch_size = 1
        n_flif = hps.n_flif
        tile_sizes = [32, 64, 128]
        n_ims_per_size = [4, 16, 300]
        ims = full_imagenet(hps.path,
                            n,
                            rng=np.random.RandomState(int(hps.seed)))
        flif_ims = ims[:n_flif]
        im_locations = list(range(n_flif))  # mark the actual image boundaries
        ims = ims[n_flif:]
        out = []
        for n_ims, tile_size in zip(n_ims_per_size, tile_sizes):
            raw_ims = [
                im for im in ims[:n_ims]
                if im.shape[2] > tile_size and im.shape[3] > tile_size
            ]
            tiled_ims = [tiled_full(im, tile_size) for im in raw_ims]
            n_tiles_per_im = [len(tiled_im) for tiled_im in tiled_ims]
            for n in n_tiles_per_im:
                im_locations.append(im_locations[-1] + n)
            out += [tile for tiled_im in tiled_ims
                    for tile in tiled_im]  # unroll
            ims = ims[n_ims:]
        if len(ims):
            out += ims
            last_el = im_locations[-1]
            im_locations += [i + 1 + last_el for i in range(len(ims))]
        print('Image locations:')
        print(im_locations)
        return None, flif_ims + sorted(out, key=lambda x: x.size, reverse=True)

    test_images_name = "test_images"
    if hps.dataset.startswith(test_images_name):
        indices = range(14) if hps.dataset == test_images_name else \
            [int(i) for i in hps.dataset[len(test_images_name):].split(';')]

        hps.eval_batch_size = 1
        hps.batch_size = 1
        return None, [test_image(index) for index in indices]

    if hps.dataset.startswith("test_image"):
        index = int(hps.dataset[len("test_image"):])

        hps.eval_batch_size = 1
        hps.batch_size = 1
        return None, test_image(index)

    tiled_test_images_name = "tiled_test_images"
    if hps.dataset.startswith(tiled_test_images_name):
        indices = range(14) if hps.dataset == tiled_test_images_name else \
            [int(i) for i in hps.dataset[len(tiled_test_images_name):].split(';')]

        return None, (np.concatenate(
            [tiled(test_image(index)) for index in indices], 0))

    if hps.dataset.startswith("tiled_test_image"):
        index = int(hps.dataset[len("tiled_test_image"):])

        return None, (np.concatenate(tiled(test_image(index)), 0))

    if hps.dataset.startswith("sampling_test_images"):
        resolution = int(hps.dataset[len("sampling_test_images"):])

        hps.eval_batch_size = 1
        hps.batch_size = 1
        return None, sampling_testimages(resolution=resolution)

    if hps.dataset.startswith("sampling_test_image"):
        index = int(hps.dataset[len("sampling_test_image"):])

        hps.eval_batch_size = 1
        hps.batch_size = 1
        return None, sampling_testimage(index)

    if hps.dataset.startswith("half_sampling_test_image"):
        index = int(hps.dataset[len("half_sampling_test_image"):])

        hps.eval_batch_size = 1
        hps.batch_size = 1
        return None, sampling_testimage(index, resolution=1200)

    return datasets[hps.dataset]()
コード例 #8
0
ファイル: squeezenet.py プロジェクト: cvtower/basedMl
def main():
    parser = argparse.ArgumentParser(description="SqueezeNet example.")
    parser.add_argument("--batch-size",
                        type=int,
                        default=32,
                        dest='batchsize',
                        help="Size of the mini batch. Default: 32.")
    parser.add_argument("--action",
                        type=str,
                        default='train',
                        help="Action to be performed, train/predict")
    parser.add_argument("--epochs",
                        type=int,
                        default=20,
                        help="Number of epochs, default 20.")
    parser.add_argument("--lr",
                        type=float,
                        default=0.001,
                        help="Learning rate of SGD, default 0.001.")
    parser.add_argument("--epsilon",
                        type=float,
                        default=1e-8,
                        help="Epsilon of Adam epsilon, default 1e-8.")
    parser.add_argument("-p",
                        "--path",
                        type=str,
                        default='.',
                        required=True,
                        help="Path where the images are. Default: $PWD.")
    parser.add_argument("-v",
                        "--val-path",
                        type=str,
                        default='.',
                        dest='valpath',
                        help="Path where the val images are. Default: $PWD.")
    parser.add_argument("--img-width",
                        type=int,
                        default=224,
                        dest='width',
                        help="Rows of the images, default: 224.")
    parser.add_argument("--img-height",
                        type=int,
                        default=224,
                        dest='height',
                        help="Columns of the images, default: 224.")
    parser.add_argument("--channels",
                        type=int,
                        default=3,
                        help="Channels of the images, default: 3.")

    args = parser.parse_args()
    sgd = SGD(lr=args.lr, decay=0.0002, momentum=0.9)
    batch_size = 25
    nb_class = 10
    t0 = time.time()
    if args.action == 'train':
        (x_train, y_train), (x_test, y_test) = cifar10(args.path)
        y_train = oneShot(y_train, 10)
        y_test = oneShot(y_test, 10)
        print(y_train[0])
        train_generator = generator([x_train, y_train], batch_size)
        validation_generator = generator([x_test, y_test], batch_size)
        #train_generator = dp.train_data_generator(
        #args.path, args.width, args.height)
        #validation_generator = dp.val_data_generator(
        #args.valpath, args.width, args.height)

        #classes = train_generator.class_indices
        #nb_train_samples = train_generator.samples
        #nb_val_samples = validation_generator.samples
        #print("[squeezenet_demo] N training samples: %i " % nb_train_samples)
        #print("[squeezenet_demo] N validation samples: %i " % nb_val_samples)
        #nb_class = train_generator.num_class
        #print('[squeezenet_demo] Total classes are %i' % nb_class)

        t0 = print_time(t0, 'initialize data')
        model = km.SqueezeNet(nb_class,
                              inputs=(args.channels, args.height, args.width))
        # dp.visualize_model(model)
        t0 = print_time(t0, 'build the model')

        model.compile(optimizer=sgd,
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        t0 = print_time(t0, 'compile model')

        model.fit_generator(
            train_generator,
            samples_per_epoch=len(y_train) // batch_size * batch_size,
            nb_epoch=args.epochs,
            validation_data=validation_generator,
            nb_val_samples=(len(y_test) // batch_size * batch_size))

        t0 = print_time(t0, 'train model')
        model.save_weights('./weights.h5', overwrite=True)
        """
        model_parms = {'nb_class': nb_class,
                       'nb_train_samples': nb_train_samples,
                       'nb_val_samples': nb_val_samples,
                       'classes': classes,
                       'channels': args.channels,
                       'height': args.height,
                       'width': args.width}
        write_json(model_parms, fname='./model_parms.json')
        """
        t0 = print_time(t0, 'save model')

    elif args.action == 'predict':
        _parms = parse_json('./model_parms.json')
        model = km.SqueezeNet(_parms['nb_class'],
                              inputs=(_parms['channels'], _parms['height'],
                                      _parms['width']),
                              weights_path='./weights.h5')
        #dp.visualize_model(model)
        model.compile(optimizer=sgd,
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        X_test, Y_test, classes, F = dp.prepare_data_test(
            args.path, args.width, args.height)
        t0 = print_time(t0, 'prepare data')

        outputs = []
        results = model.predict(X_test, batch_size=args.batchsize, verbose=1)
        classes = _parms['classes']
        for i in range(0, len(F)):
            _cls = results[i].argmax()
            max_prob = results[i][_cls]
            outputs.append({'input': F[i], 'max_probability': max_prob})
            cls = [key for key in classes if classes[key] == _cls][0]
            outputs[-1]['class'] = cls
            print('[squeezenet_demo] %s: %s (%.2f)' % (F[i], cls, max_prob))
        t0 = print_time(t0, 'predict')
コード例 #9
0
ファイル: utils_cifar.py プロジェクト: dago0/BayesByHypernet
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import scipy
import time
from observations import cifar10

from sklearn.calibration import calibration_curve

try:
    import cPickle as pickle
except Exception as e:
    import pickle

(x_train, y_train), (x_test, y_test) = cifar10('/data/np716/cifar_10/')

x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)

x_train_first = x_train[y_train < 5]
y_train_first = y_train[y_train < 5]

x_test_first = x_test[y_test < 5]
y_test_first = y_test[y_test < 5]

x_test_outlier = x_test[y_test >= 5]


# helper for rotation
def rotate(img, angle):
コード例 #10
0
ファイル: cifar10.py プロジェクト: louiskirsch/libmodular
def run():
    # Load dataset
    (x_train, y_train), (x_test,
                         y_test) = observations.cifar10('~/data/cifar10')
    y_test = y_test.astype(np.uint8)  # Fix test_data dtype
    dataset_size = x_train.shape[0]

    # Train dataset
    train = tf.data.Dataset.from_tensor_slices(
        (x_train, y_train))._enumerate().repeat().shuffle(50000).batch(128)
    # Test dataset
    dummy_data_indices = tf.zeros([x_test.shape[0]], dtype=tf.int64)
    test = tf.data.Dataset.from_tensors(
        (dummy_data_indices, (x_test, y_test))).repeat()

    # Handle to switch between datasets
    handle = tf.placeholder(tf.string, [])
    itr = tf.data.Iterator.from_string_handle(handle, train.output_types,
                                              train.output_shapes)
    data_indices, (inputs, labels) = itr.get_next()

    # Preprocessing
    inputs = tf.cast(inputs, tf.float32) / 255.0
    inputs = tf.transpose(inputs, perm=(0, 2, 3, 1))
    labels = tf.cast(labels, tf.int32)

    def network(context: modular.ModularContext):
        # 4 modular CNN layers
        activation = inputs
        for _ in range(4):
            input_channels = activation.shape[-1]
            filter_shape = [3, 3, input_channels, 8]
            modules = modular.create_conv_modules(filter_shape,
                                                  module_count=5,
                                                  strides=[1, 1, 1, 1])
            hidden = modular.modular_layer(activation,
                                           modules,
                                           parallel_count=1,
                                           context=context)
            pooled = tf.nn.max_pool(hidden,
                                    ksize=[1, 2, 2, 1],
                                    strides=[1, 2, 2, 1],
                                    padding='SAME')
            activation = tf.nn.relu(pooled)

        flattened = tf.layers.flatten(activation)
        logits = tf.layers.dense(flattened, units=10)

        target = modular.modularize_target(labels, context)
        loglikelihood = tf.distributions.Categorical(logits).log_prob(target)

        predicted = tf.argmax(logits, axis=-1, output_type=tf.int32)
        accuracy = tf.reduce_mean(
            tf.cast(tf.equal(predicted, target), tf.float32))

        selection_entropy = context.selection_entropy()
        batch_selection_entropy = context.batch_selection_entropy()

        return loglikelihood, logits, accuracy, selection_entropy, batch_selection_entropy

    template = tf.make_template('network', network)
    optimizer = tf.train.AdamOptimizer()
    e_step, m_step, eval = modular.modularize(template,
                                              optimizer,
                                              dataset_size,
                                              data_indices,
                                              sample_size=10)
    ll, logits, accuracy, s_entropy, bs_entropy = eval

    tf.summary.scalar('loglikelihood', tf.reduce_mean(ll))
    tf.summary.scalar('accuracy', accuracy)
    tf.summary.scalar('entropy/exp_selection', tf.exp(s_entropy))
    tf.summary.scalar('entropy/exp_batch_selection', tf.exp(bs_entropy))

    with tf.Session() as sess:
        time = '{:%Y-%m-%d_%H:%M:%S}'.format(datetime.datetime.now())
        writer = tf.summary.FileWriter('logs/train_{}'.format(time))
        test_writer = tf.summary.FileWriter('logs/test_{}'.format(time))
        general_summaries = tf.summary.merge_all()
        m_step_summaries = tf.summary.merge(
            [create_m_step_summaries(), general_summaries])
        sess.run(tf.global_variables_initializer())
        train_dict = {handle: make_handle(sess, train)}
        test_dict = {handle: make_handle(sess, test)}

        for i in range(10000):
            # Switch between E-step and M-step
            step = e_step if i % 10 == 0 else m_step

            # Sometimes generate summaries
            if i % 99 == 0:
                summaries = m_step_summaries if step == m_step else general_summaries
                _, summary_data = sess.run([step, summaries], train_dict)
                writer.add_summary(summary_data, global_step=i)
                summary_data = sess.run(general_summaries, test_dict)
                test_writer.add_summary(summary_data, global_step=i)
            else:
                sess.run(step, train_dict)

        writer.close()
        test_writer.close()
コード例 #11
0
import seaborn as sns
import pandas as pd
import scipy
import time
from observations import cifar10

from sklearn.calibration import calibration_curve

try:
    import cPickle as pickle
except Exception as e:
    import pickle

(x_train,
 y_train), (x_test,
            y_test) = cifar10('/vol/biomedic/users/np716/data/cifar_10/')

x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)

x_train_first = x_train[y_train < 5]
y_train_first = y_train[y_train < 5]

x_test_first = x_test[y_test < 5]
y_test_first = y_test[y_test < 5]

x_test_outlier = x_test[y_test >= 5]


# helper for rotation
def rotate(img, angle):
コード例 #12
0
ファイル: load_data.py プロジェクト: biggs/viterbi-em-images
 def _load(self, datadir):
     return obs.cifar10(join(datadir, "cifar10"))
コード例 #13
0
ファイル: tf_train.py プロジェクト: c1a1o1/hilloc
def images(hps):
    cifar10 = lambda: map(itemgetter(0), observations.cifar10(hps.path))
    imagenet32 = lambda: map(lambda x: np.transpose(x, (0, 3, 1, 2)),
                             observations.small32_imagenet(hps.path))
    imagenet64 = lambda: map(lambda x: np.transpose(x, (0, 3, 1, 2)),
                             observations.small64_imagenet(hps.path))

    def quartered(dataset, times=1):
        return lambda: map(lambda x: quarter_images_repeatedly(x, times=times),
                           dataset())

    def quarter_images_repeatedly(x, times):
        for _ in range(times):
            x = quarter_images(x)

        return x

    def quarter_images(x):
        size = x.shape[2]

        quarters = [
            x[:, :, :size // 2, :size // 2], x[:, :, size // 2:, :size // 2],
            x[:, :, :size // 2, size // 2:], x[:, :, size // 2:, size // 2:]
        ]

        return np.concatenate(quarters)

    datasets = {
        "cifar10": cifar10,
        "cifar10to16": quartered(cifar10),
        "cifar10to8": quartered(cifar10, times=2),
        "small32_imagenet": imagenet32,
        "small32to16_imagenet": quartered(imagenet32),
        "small32to8_imagenet": quartered(imagenet32, times=2),
        "small64_imagenet": imagenet64,
        "small64to32_imagenet": quartered(imagenet64),
        "small64to16_imagenet": quartered(imagenet64, times=2),
        "small64to8_imagenet": quartered(imagenet64, times=3),
    }

    test_images_name = "test_images"
    if hps.dataset.startswith(test_images_name):
        indices =  range(14) if hps.dataset == test_images_name else \
            [int(i) for i in hps.dataset[len(test_images_name):].split(';')]

        hps.eval_batch_size = 1
        hps.batch_size = 1
        return None, [test_image(index) for index in indices]

    if hps.dataset.startswith("test_image"):
        index = int(hps.dataset[len("test_image"):])

        hps.eval_batch_size = 1
        hps.batch_size = 1
        return None, test_image(index)

    if hps.dataset.startswith("sampling_test_images"):
        resolution = int(hps.dataset[len("sampling_test_images"):])

        hps.eval_batch_size = 1
        hps.batch_size = 1
        return None, sampling_testimages(resolution=resolution)

    if hps.dataset.startswith("sampling_test_image"):
        index = int(hps.dataset[len("sampling_test_image"):])

        hps.eval_batch_size = 1
        hps.batch_size = 1
        return None, sampling_testimage(index)

    if hps.dataset.startswith("half_sampling_test_image"):
        index = int(hps.dataset[len("half_sampling_test_image"):])

        hps.eval_batch_size = 1
        hps.batch_size = 1
        return None, sampling_testimage(index, resolution=1200)

    return datasets[hps.dataset]()