Esempio n. 1
0
def main():
    train_data, train_label, test_data, test_label =  mnist(ntrain=6000,ntest=1000,digit_range=[0,10])
    print("X shape is ", train_data.shape)

    p_vals= [0.01, 0.1, 0.5, 0.8]
    for p in p_vals:
        se_autoencoder(train_data,p)
Esempio n. 2
0
def sload(dataset, **kwargs):

    data = {}
    keys = [
        'distances', 'features', 'sample_labels', 'sample_classes', 'colors',
        'edges', 'labels'
    ]

    for key in keys:
        data[key] = None

    if dataset == 'equidistant':
        from clusters import equidistant
        data['distances'] = equidistant(**kwargs)
    elif dataset == 'clusters':
        from clusters import clusters
        data['distances'], data['sample_classes'] = clusters(**kwargs)
        data['sample_colors'] = data['sample_classes']
    elif dataset == 'clusters2':
        from clusters import clusters2
        data['features'], data['sample_classes'] = clusters2(**kwargs)
        data['distances'] = data['features']
        data['sample_colors'] = data['sample_classes']
    elif dataset == 'mnist':
        data['features'], data['sample_classes'] = mnist(**kwargs)
        data['distances'] = data['features']
        data['sample_colors'] = data['sample_classes']
    else:
        print('***dataset not found***')

    return data
Esempio n. 3
0
def mnist_2_class():
    """ Read mnist dataset with only 0 and 1s """
    X_train, X_test, Y_train, Y_test, fn_train, fn_test = mnist()

    index_train = np.where((from_one_hot(Y_train).flatten() == 0)
                           | (from_one_hot(Y_train).flatten() == 1))
    index_test = np.where((from_one_hot(Y_test).flatten() == 0)
                          | (from_one_hot(Y_test).flatten() == 1))

    return X_train[index_train,:], X_test[index_test,:], \
           Y_train[index_train,:], Y_test[index_test,:], \
           fn_train[index_train], fn_test[index_test]
Esempio n. 4
0
def train(model,
          name,
          epoch=pow2(7),
          computational_effort_factor=8,
          noise=False):
    batch_size = epoch * computational_effort_factor
    print("epoch: {0}, batch: {1}".format(epoch, batch_size))
    x_train, _, x_test, _ = mnist()
    if noise:
        x_input = add_noise(x_train)
    else:
        x_input = x_train

    hf5 = "{0}/model.h5".format(name)
    model.fit(x_input,
              x_train,
              nb_epoch=epoch,
              batch_size=(batch_size // 1),
              shuffle=True,
              validation_data=(x_test, x_test),
              callbacks=[
                  TensorBoard(log_dir="{0}".format(name)),
                  CSVLogger("{0}/log.csv".format(name), append=True),
                  EarlyStopping(monitor='loss',
                                patience=6,
                                verbose=1,
                                mode='min',
                                min_delta=0.0001),
                  ReduceLROnPlateau(monitor='loss',
                                    factor=0.7,
                                    patience=3,
                                    verbose=1,
                                    mode='min',
                                    epsilon=0.0001)
              ])
    model.save(hf5)
    plot_examples(name, model, x_test)
Esempio n. 5
0
import sys, os

from PIL import Image

import numpy as np
from keras.datasets import mnist

sys.path.append(os.pardir)  # 부모 디렉터리의 파일을 가져올 수 있도록 설정


def img_show(img):
    pil_img = Image.fromarray(np.uint8(img))
    pil_img.show()


(x_train, t_train), (x_test, t_test) = mnist(flatten=True, normalize=False)

# 각 데이터의 형상 출력
print(x_train.shape)
print(t_train.shape)
print(x_test.shape)
print(t_test.shape)

# (훈련 이미지, 훈련 레이블), (시험 이미지, 시험 레이블)
(x_train, t_train), (x_test, t_test) = mnist(flatten=True, normalize=False)

img = x_train[0]
label = t_train[0]
print(label)

print(img.shape)
Esempio n. 6
0
def mload(dataset, n_samples=100, n_perspectives=2, **kwargs):
    "returns dictionary with datasets"

    distances = []
    data = {}
    if dataset == 'equidistant':
        length = n_samples * (n_samples - 1) // 2
        for persp in range(n_perspectives):
            distances.append(np.random.normal(1, 0.1, length))
        data['image_colors'] = n_samples - 1
    elif dataset == 'disk':
        import misc, projections
        X = misc.disk(n_samples, dim=3)
        proj = projections.PROJ()
        Q = proj.generate(number=n_perspectives, method='random')
        Y = proj.project(Q, X)
        data['true_images'] = Y
        data['true_embedding'] = X
        data['true_projections'] = Q
        distances = Y
        data['image_colors'] = 0
    elif dataset == 'clusters2a':
        from clusters import createClusters
        D, data['image_colors'] = \
            createClusters(n_samples, n_perspectives)
    elif dataset == 'clusters':
        from clusters import clusters
        distances = []
        data['image_classes'] = []
        data['image_colors'] = []
        if 'n_clusters' in kwargs:
            n_clusters = kwargs.pop('n_clusters')
        if isinstance(n_clusters, int):
            n_clusters = [n_clusters] * n_perspectives
        else:
            n_perspectives = len(n_clusters)
        for i in range(n_perspectives):
            d, c = clusters(n_samples, n_clustesr=n_clusters[i], **kwargs)
            distances.append(d)
            data['image_classes'].append(c)
            data['image_colors'].append(c)
    elif dataset == 'clusters2':
        from clusters import clusters2
        distances = []
        data['image_colors'] = []
        if 'n_clusters' in kwargs:
            n_clusters = kwargs['n_clusters']
        if isinstance(n_clusters, int):
            n_clusters = [n_clusters] * n_perspectives
        for persp in range(n_perspectives):
            d, c = clusters2(n_samples, n_clusters[persp])
            distances.append(d)
            data['image_colors'].append(c)
    elif dataset == '123':
        import projections
        X = np.genfromtxt(directory + '/123/123.csv', delimiter=',')
        X1 = np.genfromtxt(directory + '/123/1.csv', delimiter=',')
        X2 = np.genfromtxt(directory + '/123/2.csv', delimiter=',')
        X3 = np.genfromtxt(directory + '/123/3.csv', delimiter=',')
        proj = projections.PROJ()
        Q = proj.generate(number=3, method='cylinder')
        distances = [X1, X2, X3]
        data['true_embedding'] = X
        data['true_projections'] = Q
        data['true_images'] = [X1, X2, X3]
        data['colors'] = True
    elif dataset == 'florence':
        import florence
        distances, dictf = florence.setup()
        for key, value in dictf.items():
            data[key] = value
    elif dataset == 'credit':
        import csv
        path = directory + '/credit/'
        Y = []
        for ind in ['1', '2', '3']:
            filec = open(path + 'discredit3_tsne_cluster_1000_' + ind + '.csv')
            array = np.array(list(csv.reader(filec)), dtype='float')
            array += np.random.randn(len(array), len(array)) * 1e-4
            Y.append(array)
        distances = Y
    elif dataset == 'phishing':
        import phishing
        features = phishing.features
        labels = phishing.group_names
        if n_samples is None:
            n_samples = len(features[0])
        Y, perspective_labels = [], []
        for group in [0, 1, 2, 3]:
            assert group in [0, 1, 2, 3]
            Y.append(features[group][0:n_samples])
            perspective_labels.append(labels[group])
        sample_colors = phishing.results[0:n_samples]
        distances = Y
        data['sample_colors'] = sample_colors
        data['perspective_labels'] = perspective_labels
    elif dataset == 'mnist':
        X, data['sample_colors'] = mnist(**kwargs)
        data['features'] = X
        distances = [X[:, 0:28 * 14], X[:, 28 * 14::]]
        data['sample_classes'] = data['sample_colors']
    else:
        print('***dataset not found***')
    return distances, data
Esempio n. 7
0
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.datasets import mnist
from keras.callbacks import ModelCheckpoint
import numpy as np
import os
from source import DataNormalizer, get_activations, mnist, FileRecord, CvIterator, CvTestPerformance
from model_repo import plain_vgg


# model_save_dir = '../saved_models/VGG_MNIST/'
# model_save_filename = 'weights.{epoch:02d}-{val_acc:.2f}.hdf5'
k = 10
nb_epoch = 100

(all_x_train, all_y_train), (real_x_test, real_y_test) = mnist(for_conv=True)
test = CvIterator(all_x_train[1:100], all_y_train[1:100], k=k, validation_split=0.2)
# cv_recorder = CvTestPerformance('plainMnist')

i = 0
for ((x_train, y_train), (x_valid, y_valid)), (x_test, y_test) in test:
    # model = plain_vgg(num_layers=6)
    model = plain_vgg()
    # model_status_save = '../data/assess_plainMnist{0}.txt'.format(i)
    # input = open(model_status_save, 'a')
    # input.write('loss,val_loss,acc,val_acc,itr\n')
    # file_recorder = FileRecord(input)
    callbacks = []
    # callbacks.append(file_recorder)
    model.fit(x_train, y_train, batch_size=256, nb_epoch=nb_epoch,
              callbacks=callbacks, validation_data=(x_valid, y_valid), show_accuracy=True,
Esempio n. 8
0
    from keras.applications import MobileNetV2
    model = MobileNetV2(weights='imagenet')
    keras2lwnn(model, 'mobilenetv2')

if(__name__ == '__main__'):
    transpose()
    conv2d('conv2d_1',shape=[5,5,3], filters=1, kernel_size=(2,2), strides=(1,1), padding="same")
    conv2d('conv2d_2')
    conv2d('conv2d_3',shape=[45,17,23], filters=13, kernel_size=(2,3), strides=(3,2), padding="valid")
    relu('relu_1')
    maxpool('maxpool_1')
    maxpool('maxpool_2', shape=[30,20,5], pool_size=(3, 2), strides=(3, 2))
    dense('dense_1')
    dense('dense_2', 13, 1578)
    softmax('softmax_1')
    mnist()
    pad('pad_1')
    pad('pad_2',shape=[52,12,7], padding=(2,5))
    conv2d_bn('conv2dbn_1')
    conv1d('conv1d_1')
    dwconv2d('dwconv2d_1')
    dwconv2d('dwconv2d_2',shape=[57,15,3],kernel_size=(2,2), strides=(1,1), padding="same")
    dwconv2d('dwconv2d_3',shape=[45,17,23], kernel_size=(2,3), strides=(3,2), padding="valid")
    maxpool1d('maxpool1d_1')
    maxpool1d('maxpool1d_2',shape=[34,29], pool_size=3, strides=3)
    concat('concat_1')
    concat('concat_2', axis=1)
    concat('concat_3', axis=2)
    concat('concat_4', axis=0)
    uci_inception()
    avgpool('avgpool_1')