Example #1
0
    def get_ndarray(self):

        (x_train, y_train), (x_test, y_test) = load_cifar10(True)
        # y is integer at this moment

        #y_train = np.identity(10)[y_train]
        #y_test  = np.identity(10)[y_test ]

        print(x_train[:2])
        print(x_train.shape)
        print(y_train.shape)
        print(x_test.shape)
        print(y_test.shape)
        return (x_train, y_train), (x_test, y_test)
def main():
    cifar10_data = cifar10.load_cifar10()

    learning_rate = 0.001
    training_epochs = 100
    batch_size = 100
    n_input = 3072
    n_train = cifar10_data.train.num_examples

    boundary = 6.0 / (32  + 64)
    weights = {
        'encode_conv_weight_1':tf.Variable(tf.random_uniform((3, 3, 3, 32), minval = -boundary, maxval = boundary)),
        'encode_conv_weight_2':tf.Variable(tf.random_uniform((3, 3, 32, 64), minval = -boundary, maxval = boundary)),
        'encode_conv_weight_3':tf.Variable(tf.random_uniform((3, 3, 64, 64), minval = -boundary, maxval = boundary)),
        'encode_dense_weight_1':tf.Variable(xavier_init(1024, 500)),
        'decode_dense_weight_1':tf.Variable(xavier_init(500, 1024)),
        'decode_conv_weight_3': tf.Variable(tf.random_uniform((3, 3, 64, 64), minval = -boundary, maxval = boundary)),
        'decode_conv_weight_2': tf.Variable(tf.random_uniform((3, 3, 64, 32), minval = -boundary, maxval = boundary)),
        'decode_conv_weight_1': tf.Variable(tf.random_uniform((3, 3, 32, 3), minval = -boundary, maxval = boundary))
    }

    bias = {
        'encode_conv_bias_1':tf.Variable(tf.zeros([32])),
        'encode_conv_bias_2':tf.Variable(tf.zeros([64])),
        'encode_conv_bias_3':tf.Variable(tf.zeros([64])),
        'encode_dense_bias_1':tf.Variable(tf.zeros([500])),
        'decode_dense_bias_1':tf.Variable(tf.zeros([1024])),
        'decode_conv_bias_3': tf.Variable(tf.zeros([64])),
        'decode_conv_bias_2': tf.Variable(tf.zeros([32])),
        'decode_conv_bias_1': tf.Variable(tf.zeros([3]))
    }
    
    weights_key = ['encode_conv_weight_1',
                    'encode_conv_weight_2',
                    'encode_conv_weight_3',
                    'encode_dense_weight_1',
                    'decode_dense_weight_1',
                    'decode_conv_weight_3',
                    'decode_conv_weight_2', 
                    'decode_conv_weight_1']
    
    bias_key = ['encode_conv_bias_1',
                    'encode_conv_bias_2',
                    'encode_conv_bias_3',
                    'encode_dense_bias_1',
                    'decode_dense_bias_1',
                    'decode_conv_bias_3',
                    'decode_conv_bias_2', 
                    'decode_conv_bias_1']

    x = tf.placeholder(tf.float32, [batch_size, n_input])

    reconstruction = convAutoencoder(x, weights, bias, weights_key, bias_key)
    #loss = tf.sqrt(tf.reduce_mean(tf.square(reconstruction - x)))
    loss = loss_x_entropy(reconstruction, x) 
    #train_step = tf.train.MomentumOptimizer(learning_rate = learning_rate, momentum = 0.9).minimize(loss)
    train_step = tf.train.AdamOptimizer(learning_rate = learning_rate, beta1 = 0.9, beta2 = 0.999).minimize(loss)

    init = tf.initialize_all_variables()
   

    sess = tf.Session()
 
    summary_dir = pjoin(FLAGS.summary_dir, 'conv_auto_encoder_training_cifar10')
    summary_writer = tf.train.SummaryWriter(summary_dir,
                                            graph_def=sess.graph_def,
                                            flush_secs=FLAGS.flush_secs)
    
    print("\n\n")
    print("| Training Step | Cross Entropy |   Epoch  |")
    print("|---------------|---------------|----------|")

    sess.run(init)
    step = 1
    while step * batch_size < training_epochs * n_train:
        batch_x, batch_y = cifar10_data.train.next_batch(batch_size)
        #print(np.mean(batch_x))
        feed_dict = {x:batch_x}
        sess.run(train_step, feed_dict = feed_dict) 
        if step % 450 == 0:
            loss_summary = sess.run(loss, feed_dict = feed_dict)
            #loss_summary_op = tf.scalar_summary("reconstruction_error", loss_summary)
            
            #summary_scalar_str = sess.run(loss_summary_op, feed_dict = {x: batch_x})
            #summary_writer.add_summary(summary_scalar_str, step)
                
            output = "| {0:>13} | {1:13.4f} | Epoch {2}  |"\
                 .format(step, loss_summary, step * batch_size // n_train + 1)

            print(output)
        if step % 900 == 0:
            image_summary_op = \
                tf.image_summary("training_images",
                             tf.reshape(x,
                                        (FLAGS.batch_size,
                                         32,
                                         32, 3)),
                             max_images=FLAGS.batch_size)
            reconstruction_summary_op = \
                tf.image_summary("reconstruction_image",
                            tf.reshape(reconstruction, 
                                        (FLAGS.batch_size,
                                         32,
                                         32, 3)),
                             max_images=FLAGS.batch_size)

            summary_img_str = sess.run(image_summary_op,
                                   feed_dict=feed_dict)
            summary_writer.add_summary(summary_img_str, step)

            summary_recon_image_str = sess.run(reconstruction_summary_op,
                                    feed_dict = feed_dict)
            summary_writer.add_summary(summary_recon_image_str, step)
        step+=1
    batch_x, batch_y = cifar10_data.test.next_batch(FLAGS.batch_size)
    feed_dict = {x: np.array(batch_x)}

    recon_target = sess.run(reconstruction, feed_dict=feed_dict)
    np.save("convolutional_autoencoder_cifar10.npy", recon_target.reshape(100, 32, 32, 3))

    print("Optimization Finished!")
Example #3
0
def _generate_cifar10_dataset():
    (Xtr, Ytr), (Xts, Yts) = load_cifar10()
    print(Xtr.shape, Xts.shape)
    dataset = CIFAR10Dataset(Xtr, Ytr, Xts, Yts)
    dataset.save()
Example #4
0
import sys
import numpy as np
import pickle
from skimage import color
from skimage.transform import resize

#Classification
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.ensemble import RandomForestClassifier

#Add filter library path
sys.path.append('python')

#Download and open the database
from cifar10 import load_cifar10
Train = load_cifar10(meta='cifar-10-batches-py', mode=5)
Test = load_cifar10(meta='cifar-10-batches-py', mode='test')

TrainImages = Train["data"]
TrainLabels = Train["labels"]
TestImages = Test["data"]
TestLabels = Test["labels"]

#Transform the images to grayscale
Train = []
for i in range(0, len(TrainImages)):
    img = color.rgb2gray(resize(np.asanyarray(TrainImages[i]), (32, 32)))
    Train.append(img)

Test = []
for i in range(0, len(TestImages)):
def main_unsupervised():
  with tf.Graph().as_default() as g:
    sess = tf.Session()

    num_hidden = FLAGS.num_hidden_layers

    # The shape of the hidden layers
    ae_hidden_shapes = [getattr(FLAGS, "hidden{0}_units".format(j + 1))
                        for j in xrange(num_hidden)]

    # The shape of the all the layers, including the input layer and the classification layer... Why do we need the classification layer again?
    ae_shape = [FLAGS.image_pixels] + ae_hidden_shapes + [FLAGS.image_pixels]

    ae = AutoEncoder(ae_shape, sess)


    cifar10_data = cifar10.load_cifar10()
    num_train = cifar10_data.train.num_examples


    n = 4
    with tf.variable_scope("net_training"):

        input_ = tf.placeholder(dtype=tf.float32,
                                shape=(FLAGS.batch_size, ae_shape[0]),
                                name='ae_input_pl')
        
        target_for_loss = ae.net(input_, n)


        loss = loss_x_entropy(target_for_loss, input_)
        #loss = tf.sqrt(tf.reduce_mean(tf.square(input_ - target_for_loss)))


    


        summary_dir = pjoin(FLAGS.summary_dir, 'auto_encoder_training')
        summary_writer = tf.train.SummaryWriter(summary_dir,
                                                graph_def=sess.graph_def,
                                                flush_secs=FLAGS.flush_secs)
        summary_vars = [ae[key] for key in ae._variables.keys()]

        hist_summarries = [tf.histogram_summary(v.op.name, v)
                           for v in summary_vars]
        
        
        summary_op = tf.merge_summary(hist_summarries)

        loss_summary_op_train = tf.scalar_summary("reconstruction error", loss)


        #train_step = tf.train.MomentumOptimizer(learning_rate = 0.01, momentum = 0.9).minimize(loss)
        train_step = tf.train.AdamOptimizer(learning_rate = 0.001, beta1 = 0.9, beta2 = 0.999).minimize(loss)
        sess.run(tf.initialize_all_variables())

        print("\n\n")
        print("| Training Step | Cross Entropy Train | Cross Entropy Test | Layer  |   Epoch  |")
        print("|---------------|------------------------|-----------------------|--------|----------|")

        #for step in xrange(FLAGS.pretraining_epochs * num_train):
        for step in xrange(45000):
            input_data, input_label = cifar10_data.train.next_batch(FLAGS.batch_size)
            feed_dict = {input_: np.array(input_data)}

            loss_summary, loss_value = sess.run([train_step, loss],
                                              feed_dict=feed_dict)

            if step % 450 == 0:
                summary_str = sess.run(summary_op, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)

                total_test_loss = 0
                for test_step in xrange(cifar10_data.train.num_examples // FLAGS.batch_size):
                    input_data, input_label = cifar10_data.train.next_batch(FLAGS.batch_size)
                    feed_dict = {input_: np.array(input_data)}
                    test_loss = sess.run(loss, feed_dict = feed_dict)
                    total_test_loss += test_loss * FLAGS.batch_size

                loss_summary_op_test = tf.scalar_summary("reconstruction_error_test", total_test_loss)
                
                loss_summary_op = tf.merge_summary([loss_summary_op_train, loss_summary_op_test])
                summary_scalar_str = sess.run(loss_summary_op, feed_dict = feed_dict)
                summary_writer.add_summary(summary_scalar_str, step)
                    
                output = "| {0:>13} | {1:19.4f} | {2:19.4f} |Layer {3} | Epoch {4}  |"\
                     .format(step, loss_value, total_test_loss / cifar10_data.train.num_examples, n, step * FLAGS.batch_size//num_train + 1)

                print(output)
            if step % 900 == 0:
                image_summary_op = \
                    tf.image_summary("training_images",
                                 tf.reshape(input_,
                                            (FLAGS.batch_size,
                                             FLAGS.image_size,
                                             FLAGS.image_size, 3)),
                                 max_images=FLAGS.batch_size)
                reconstruction_summary_op = \
                    tf.image_summary("reconstruction_image",
                                tf.reshape(target_for_loss, 
                                            (FLAGS.batch_size,
                                             FLAGS.image_size,
                                             FLAGS.image_size, 3)),
                                 max_images=FLAGS.batch_size)

                summary_img_str = sess.run(image_summary_op,
                                       feed_dict=feed_dict)
                summary_writer.add_summary(summary_img_str, step)

                summary_recon_image_str = sess.run(reconstruction_summary_op,
                                        feed_dict = feed_dict)
                summary_writer.add_summary(summary_recon_image_str, step)
        
        input_data, input_label = cifar10_data.test.next_batch(FLAGS.batch_size)
        feed_dict = {input_: np.array(input_data)}

        recon_target = sess.run(target_for_loss, feed_dict=feed_dict)
        np.save("autoencoder_cifar10.npy", recon_target.reshape(100, 32, 32, 3))

  return ae
def main():
    
    cifar10_data = cifar10.load_cifar10()

    learning_rate = 0.00001
    training_epochs = 200
    batch_size = 250
    n_input = 3072
    n_train = cifar10_data.train.num_examples

    boundary = 6.0 / (32 + 32)
    
    weights = {
        'encode_conv_weight_1':tf.Variable(tf.random_uniform((5, 5, 3, 32), minval = -boundary, maxval = boundary)),
        'encode_conv_weight_2':tf.Variable(tf.random_uniform((5, 5, 32, 64), minval = -boundary, maxval = boundary)),
        'encode_conv_weight_3':tf.Variable(tf.random_uniform((5, 5, 64, 128), minval = -boundary, maxval = boundary)),
        'encode_fully_connected': tf.Variable(xavier_init(4 * 4 * 128, 1024)),
        'encode_output_mean':tf.Variable(xavier_init(1024, 500)),
        'encode_output_log_sigma': tf.Variable(xavier_init(1024, 500)),
        'decode_dense_weight_1':tf.Variable(xavier_init(500, 1024)),
        'decode_dense_weight_2':tf.Variable(xavier_init(1024, 2048)),
        'decode_conv_weight_3': tf.Variable(tf.random_uniform((5, 5, 128, 64), minval = -boundary, maxval = boundary)),
        'decode_conv_weight_2': tf.Variable(tf.random_uniform((5, 5, 64, 32), minval = -boundary, maxval = boundary)),
        'decode_conv_weight_1': tf.Variable(tf.random_uniform((5, 5, 32, 3), minval = -boundary, maxval = boundary))
    }

    bias = {
        'encode_conv_bias_1':tf.Variable(tf.zeros([32])),
        'encode_conv_bias_2':tf.Variable(tf.zeros([64])),
        'encode_conv_bias_3':tf.Variable(tf.zeros([128])),
        'encode_fully_connected': tf.Variable(tf.zeros([1024])),
        'encode_output_mean':tf.Variable(tf.zeros([500])),
        'encode_output_log_sigma': tf.Variable(tf.zeros([500])),
        'decode_dense_bias_1':tf.Variable(tf.zeros([1024])),
        'decode_dense_bias_2': tf.Variable(tf.zeros([2048])),
        'decode_conv_bias_3': tf.Variable(tf.zeros([64])),
        'decode_conv_bias_2': tf.Variable(tf.zeros([32])),
        'decode_conv_bias_1': tf.Variable(tf.zeros([3]))
    }
    
    weights_key = ['encode_conv_weight_1',
                    'encode_conv_weight_2',
                    'encode_conv_weight_3',
                    'encode_fully_connected',
                    'encode_output_mean',
                    'encode_output_log_sigma',
                    'decode_dense_weight_1',
                    'decode_dense_weight_2',
                    'decode_conv_weight_3',
                    'decode_conv_weight_2', 
                    'decode_conv_weight_1']
    
    bias_key = ['encode_conv_bias_1',
                    'encode_conv_bias_2',
                    'encode_conv_bias_3',
                    'encode_fully_connected',
                    'encode_output_mean',
                    'encode_output_log_sigma',
                    'decode_dense_bias_1',
                    'decode_dense_bias_2',
                    'decode_conv_bias_3',
                    'decode_conv_bias_2', 
                    'decode_conv_bias_1']

    x = tf.placeholder(tf.float32, [batch_size, n_input])
    reconstruction, cost, recon_loss = createVAE(x, weights, bias, weights_key, bias_key)
    
    optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)

    init = tf.initialize_all_variables()

    sess = tf.Session()
    summary_dir = pjoin(FLAGS.summary_dir, 'conv_vae_cifar10_sbatch')
    summary_writer = tf.train.SummaryWriter(summary_dir, graph_def = sess.graph_def, flush_secs = FLAGS.flush_secs)
    sess.run(init)

    step = 1
    
    print("\n\n")
    print("| Training Step | Cross Entropy |   Epoch  |")
    print("|---------------|---------------|----------|")
    
    while step * batch_size < training_epochs * n_train:
        batch_x, batch_y = cifar10_data.train.next_batch(batch_size)
        feed_dict = {x:batch_x}
        sess.run(optimizer, feed_dict = feed_dict)
        
        if step % 100 == 0:
            loss_summary = sess.run(recon_loss, feed_dict = feed_dict)
                
            output = "| {0:>13} | {1:13.4f} | Epoch {2}  |"\
                 .format(step, loss_summary, step * batch_size // n_train + 1)

            print(output)
            

        if step % 900 == 0:
            image_summary_op = \
                tf.image_summary("training_images",
                             tf.reshape(x,
                                        (250,
                                         32,
                                         32, 3)),
                             max_images=FLAGS.batch_size)
            reconstruction_summary_op = \
                tf.image_summary("reconstruction_image",
                            tf.reshape(reconstruction, 
                                        (250,
                                         32,
                                         32, 3)),
                             max_images=FLAGS.batch_size)

            summary_img_str = sess.run(image_summary_op,
                                   feed_dict=feed_dict)
            summary_writer.add_summary(summary_img_str, step)

            summary_recon_image_str = sess.run(reconstruction_summary_op,
                                    feed_dict = feed_dict)
            summary_writer.add_summary(summary_recon_image_str, step)
        step+=1

    print("Optimization Finished!")
Example #7
0
from assignTextons import assignTextons
from fbRun import fbRun
from fbCreate import fbCreate
fb = fbCreate(support=2, startSigma=0.6) # fbCreate(**kwargs, vis=True) for visualization
from skimage import color
import random

file = open("textons.pckl",'rb')
textons = pickle.load(file)
file.close()

file = open("model.pckl",'rb')
model = pickle.load(file)
file.close()

test_cf=load_cifar10(meta='cifar-10-batches-py', mode='test')
test_images=test_cf["data"]
test_labels=test_cf["labels"]

N=random.randrange(0,len(test_labels),1)
l=random.sample(range(0,len(test_labels)-1),N)
list_test=range(0,N-1)
from assignTextons import assignTextons

def histc(X, bins):
    import numpy as np
    map_to_bins = np.digitize(X,bins)
    r = np.zeros(bins.shape)
    for i in map_to_bins:
        r[i-1] += 1
    return np.array(r)
Example #8
0
model = open('RF', 'rb')
RF = pickle.load(model)
model.close()
text = open("text", "rb")
textons = pickle.load(text)
text.close()

import sys

sys.path.append('python')

import numpy as np
import cifar10

data = cifar10.load_cifar10(mode="test")
images_test, labels_test = cifar10.get_data(data)

from fbCreate import fbCreate

fb = fbCreate(support=2, startSigma=0.6)
k = 16 * 32

from assignTextons import assignTextons
from fbRun import fbRun
import matplotlib.pyplot as plt
import random

plt.figure()
for i in range(1, 8, 2):
Example #9
0
                 i,
                 format(cm[i, j], fmt),
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.tight_layout()


import sys
sys.path.append('python')

import numpy as np
import cifar10
data = cifar10.load_cifar10(mode=5)
images_train, labels_train = cifar10.get_data(data)

from fbCreate import fbCreate
fb = fbCreate(support=2, startSigma=0.6)

k = 16 * 32

from fbRun import fbRun
num = 200
sample = np.hstack(images_train[0:num, :, :])
filterResponses = fbRun(fb, sample)

from computeTextons import computeTextons
map, textons = computeTextons(filterResponses, k)
def main():
    cifar10_data = cifar10.load_cifar10()
    learning_rate = 0.001
    training_epochs = 1000
    batch_size = 100
    n_input = 3072
    n_train = cifar10_data.train.num_examples


    weightsKey = ['encode_h1', 'encode_h2','encode_out_mean', 'encode_out_log_sigma', 'decode_h1', 'decode_h2', 'decode_out_mean', 'decode_out_log_sigma']
    biasKey = ['encode_h1', 'encode_h2','encode_out_mean', 'encode_out_log_sigma', 'decode_h1', 'decode_h2', 'decode_out_mean', 'decode_out_log_sigma']
    
    weights = {

        #Encoder procedure
        weightsKey[0]: tf.Variable(xavier_init(3072, 1024)),
        #weightsKey[1]: tf.Variable(xavier_init(1024, 1024)),
        # Latent space is 16
        weightsKey[1]: tf.Variable(xavier_init(1024, 500)),
        weightsKey[2]: tf.Variable(xavier_init(1024, 500)),

        #Decode procedure
        weightsKey[3]: tf.Variable(xavier_init(500, 1024)),
        #weightsKey[5]: tf.Variable(xavier_init(1024, 1024)),
        weightsKey[4]: tf.Variable(xavier_init(1024, 3072)),
        #weightsKey[7]: tf.Variable(xavier_init(500, 784))
    }
    
    bias = {

        #Encoder procedure
        biasKey[0]: tf.Variable(tf.zeros([1024], dtype = tf.float32)),
        #biasKey[1]: tf.Variable(tf.zeros([1024], dtype = tf.float32)),
        # Latent space is 16
        biasKey[1]: tf.Variable(tf.zeros([500], dtype = tf.float32)),
        biasKey[2]: tf.Variable(tf.zeros([500], dtype = tf.float32)),

        #Decode procedure
        #biasKey[4]: tf.Variable(tf.zeros([1024], dtype = tf.float32)),
        biasKey[3]: tf.Variable(tf.zeros([1024], dtype = tf.float32)),
        biasKey[4]: tf.Variable(tf.zeros([3072], dtype = tf.float32)),
        #biasKey[7]: tf.Variable(xavier_init(500, 784))
    }

    x = tf.placeholder(tf.float32, [batch_size, n_input])
    reconstruction, cost, recon_loss, recon_full_loss, latent_loss = createVAE(x, weights, bias, weightsKey, biasKey)
    
    optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)

    init = tf.initialize_all_variables()

    sess = tf.Session()
    summary_dir = pjoin(FLAGS.summary_dir, 'vae_cifar10')
    summary_writer = tf.train.SummaryWriter(summary_dir, graph_def = sess.graph_def, flush_secs = FLAGS.flush_secs)
    sess.run(init)

    step = 1
    
    print("\n\n")
    print("| Training Step | Cross Entropy |   Epoch  |")
    print("|---------------|---------------|----------|")
    
    while step * batch_size < training_epochs * n_train:
        batch_x, batch_y = cifar10_data.train.next_batch(batch_size)
        feed_dict = {x:batch_x}
        sess.run(optimizer, feed_dict = feed_dict)
        reconstruction_, cost_, recon_, recon_full_, latent_full_ = sess.run([reconstruction, cost, recon_loss, recon_full_loss, latent_loss], feed_dict = feed_dict)
        if (math.isnan(float(cost_))):
            print(reconstruction_, np.mean(reconstruction_))
            print("==================================")
            print(cost_)
            print("==================================")
            print(recon_)
            print("==================================")
            print(recon_full_, np.mean(recon_full_))
            print("==================================")
            print(latent_full_, np.mean(latent_full_))
            print("==================================")
    
        if step % 450 == 0:
            loss_summary = sess.run(recon_loss, feed_dict = feed_dict)
            #loss_summary_op = tf.scalar_summary("reconstruction_error", loss_summary)
            
            #summary_scalar_str = sess.run(loss_summary_op, feed_dict = {x: batch_x})
            #summary_writer.add_summary(summary_scalar_str, step)
                
            output = "| {0:>13} | {1:13.4f} | Epoch {2}  |"\
                 .format(step, loss_summary, step * batch_size // n_train + 1)

            print(output)
            

        if step % 1100 == 0:
            image_summary_op = \
                tf.image_summary("training_images",
                             tf.reshape(x,
                                        (FLAGS.batch_size,
                                         32,
                                         32, 3)),
                             max_images=FLAGS.batch_size)
            reconstruction_summary_op = \
                tf.image_summary("reconstruction_image",
                            tf.reshape(reconstruction, 
                                        (FLAGS.batch_size,
                                         32,
                                         32, 3)),
                             max_images=FLAGS.batch_size)

            summary_img_str = sess.run(image_summary_op,
                                   feed_dict=feed_dict)
            summary_writer.add_summary(summary_img_str, step)

            summary_recon_image_str = sess.run(reconstruction_summary_op,
                                    feed_dict = feed_dict)
            summary_writer.add_summary(summary_recon_image_str, step)
        step+=1
    
    batch_x, batch_y = cifar10_data.test.next_batch(FLAGS.batch_size)
    feed_dict = {x: np.array(batch_x)}

    recon_target = sess.run(reconstruction, feed_dict=feed_dict)
    np.save("vae_cifar10.npy", recon_target.reshape(100, 32, 32, 3))

    print("Optimization Finished!")
Example #11
0
testTextonMapPath = './data/testTextonMap.pkl'
if not fileExists(testTextonMapPath):
    print('Loading test images')
    testImgs = loadPickle('./data/testFilterResponses.pkl')
    print('Loading textons')
    textonPath = './data/mapAndTexton' + str(k) + '.pkl'
    textons = loadPickle(textonPath)['textons']
    print('Asigning textons to test images')
    textonMap = assignTextons(testImgs, textons.transpose())
    toPickle(textonMap, './data/testTextonMap')
else:
    textonMap = loadPickle(testTextonMapPath)

print('Loading test labels')
testLabels = cf.load_cifar10('./cifar-10-batches-py/', mode='test')['labels']

nTest = len(testLabels)
rfPred = []
print('Evaluating on test set')
for t in range(nTest):
    print('\r {:.2f}%'.format((t + 1) * 100 / nTest), end='')
    img = textonMap[:, t * 32:(t + 1) * 32]
    histo = histc(img.flatten(), np.arange(k))
    rfPred.append(clf.predict([histo])[0])

testCM = confusion_matrix(testLabels, rfPred)

toPickle(testCM, './data/testConfusionMatrix')
print()
print('Test confusion matrix:')
Example #12
0
sinit= datetime.now()
if not os.path.isdir(os.path.join(os.getcwd(),'cifar-10-batches-py')):
    url='https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
    r=requests.get(url,allow_redirects=True)
    open('cifar-10-python.tar.gz','wb').write(r.content)
    tar=tarfile.open("cifar-10-python.tar.gz","r")
    tar.extractall()
    tar.close
    ow()
    
endt=datetime.now()-sinit
File.write('Time dowloading database= '+str(endt.total_seconds())+'\n')

start = datetime.now()

dictTrain = cifar10.load_cifar10(meta='cifar-10-batches-py', mode=1)
dictTest = cifar10.load_cifar10(meta='cifar-10-batches-py',mode='test')
numTrain=0.1#porcentaje de imagenes que se van a tomar para el train

imagesTrain,labelsTrain = cifar10.get_data(dictTrain,sliced=numTrain)
imagesTest,labelTest = cifar10.get_data(dictTest,sliced=1)
print('Label  and Data already loaded')

# Obtain concatenated matrices of random pixels



n=100; #number of train and test images
sz=32 #size of squared images
numel=sz*sz #number of selected pixels per image
train=np.array([]).reshape(sz,0)
Example #13
0
def get_origin_train_test_data():
    trainX, trainY, testX, testY = load_cifar10(
        './cifar10_data/cifar-10-batches-py')
    return trainX, trainY, testX, testY
Example #14
0
    toPickle(greys, './data/' + name)
    print()
    print(name, "turned and saved to greyscale.")
    return greys


if fileExists('./data/trainImages.pkl') and fileExists(
        './data/testImages.pkl'):
    print('Loading greyscale pickled greyscale images...')
    trainGreys = loadPickle('./data/trainImages.pkl')
    testGreys = loadPickle('./data/testImages.pkl')
    print('Loaded greyscale images.')

else:
    print("Loading CIFAR10 data...")
    trainData = cf.load_cifar10(meta='./cifar-10-batches-py/', mode=1)

    labels = trainData['labels']

    labelIds = np.unique(labels)

    imgs = []
    outLabels = []
    np.random.seed(0)

    for imgId in labelIds:
        objIDs = np.where(labels == imgId)[0]
        rand100 = np.random.choice(objIDs, 100, replace=False)
        outLabels += list(labels[rand100])
        imgs += list(trainData['data'][rand100])
Example #15
0
def train():
    #for minist
    # trX,_,vlX,_,teX,_ = load_mnist_dataset()
    # testdata = trX
    #for cifar10
    testdata = load_cifar10('./cifar-10-batches-py')

    #For 64*64 dataset
    # ds = DataSet(Use_Real_Data)
    # testdata = ds.images

    g = tf.Graph()

    with g.as_default():
        sess = tf.Session()

        z, image = create_placeholders()
        d_total_loss, g_total_loss, d_optim, g_optim, precision = loss_function(
            z, image)

        saver = tf.train.Saver()
        sess.run(tf.initialize_all_variables())

        module_file = tf.train.latest_checkpoint('./Model')

        if module_file:
            saver.restore(sess, module_file)
            logging.debug("load module file:%s, model restored" % module_file)

        for epoch in range(Train_Epochs):
            for step, real_images in enumerate(
                    gen_batches(testdata, batch_size=Batch_size)):
                batch_images = real_images.reshape(
                    [Batch_size, Image_Size, Image_Size, Image_Channel])
                batch_z = np.random.uniform(-1, 1, [Batch_size, 100]).astype(
                    np.float32)
                #optimize d once, optimize g twice
                _, dloss, prec = sess.run([d_optim, d_total_loss, precision],
                                          feed_dict=create_feed_dict(
                                              z, image, batch_z, batch_images))
                for _ in range(3):
                    _, gloss = sess.run([g_optim, g_total_loss],
                                        feed_dict={z: batch_z})

                logging.debug(
                    "Epoch:%s step:%s dloss:%s gloss:%s d_precision:%s" %
                    (epoch, step, dloss, gloss, prec))
                if (step) % 1000 == 0:
                    with tf.variable_scope("learn_param", reuse=True):
                        lr = tf.get_variable(
                            "learning_rate",
                            shape=[],
                            dtype=tf.float32,
                            initializer=tf.constant_initializer(Learning_rate),
                            trainable=False)
                        lr = tf.assign(lr, lr * Learning_rate_Decay)
                        logging.debug("learning_rate decay:%6f" % sess.run(lr))
                    batch_z = np.random.uniform(
                        -1, 1, [Batch_size, 100]).astype(np.float32)
                    # samples = sess.run([sample_image],feed_dict={z:batch_z})
                    # plot_sample(samples, str(epoch)+"_"+str(step))
                    logging.debug("Sample saved")
                    saver.save(sess, "./Model/GANModel", global_step=epoch)
Example #16
0
# coding: utf-8
import matplotlib.pyplot as plt
# from two_layer_net import TwoLayerNet
from multi_layer_net import MultiLayerNet
from cifar10 import load_cifar10
from optimizer import *

(x_train, t_train), (x_test, t_test) = load_cifar10(normalize=True,
                                                    flatten=True,
                                                    one_hot_label=True,
                                                    data_batch_number='1')

# network = TwoLayerNet(input_size=3072, hidden_size=200, output_size=10)
network = MultiLayerNet(input_size=3072,
                        hidden_size_list=[100, 100, 100],
                        output_size=10,
                        activation='relu',
                        weight_init_std='relu',
                        weight_decay_lambda=0.1,
                        use_dropout=True,
                        dropout_ration=0.5,
                        use_batchnorm=True)

iters_num = 10000
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1

train_loss_list = []
train_acc_list = []
test_acc_list = []