def mitos12_cae_model_test():
    model = {
        'inputLayer' : ImageInputLayer(width=64,height=64,channels=3),
        'hiddenLayers' : [
            ConvolutionLayer(kernelsize=5, channels=3, features=12, stride=2, weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[64,64,3]),
            ConvolutionLayer(kernelsize=5, channels=12, features=48, stride=2,weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[32,32,12]),
            ConvolutionLayer(kernelsize=5, channels=48, features=192, stride=2,weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[16,16,48])
        ]
    }

    batch_size = 50
    net = Network(Mitos12_CAE_Model, objective='reconstruction', batch_size=batch_size)
    net.setupTraining("squared-diff", "Adam")

    init = tf.initialize_all_variables()
    mitos12 = MITOS12Data(train_dirs=["/media/sf_VirtualDropbox"])

    saver = tf.train.Saver()
    sess = tf.Session()

    with sess.as_default():
        assert tf.get_default_session() is sess
        sess.run(init)
        saver.restore(sess, "/home/adrien/workspace/DeepNet/mitos12ConvAutoEncoder3.ckpt")

        xs = mitos12.next_batch(batch_size)
        rs = net.predict(xs)
        rs[0][rs[0]>1.] = 1.
        
        plt.figure(1)
        plt.subplot(2,3,1)
        plt.title('Original')
        plt.imshow(xs[0], interpolation='nearest')
        plt.axis('off')
        plt.subplot(2,3,2)
        plt.title('Reconstruction')
        plt.imshow(rs[0], interpolation='nearest')
        plt.axis('off')
        plt.gray()
        plt.subplot(2,3,4)
        plt.title('Diff - R')
        plt.imshow(np.abs(rs[0]-xs[0])[:,:,0], interpolation='nearest')
        plt.axis('off')
        plt.subplot(2,3,5)
        plt.title('Diff - G')
        plt.imshow(np.abs(rs[0]-xs[0])[:,:,1], interpolation='nearest')
        plt.axis('off')
        plt.subplot(2,3,6)
        plt.title('Diff - B')
        plt.imshow(np.abs(rs[0]-xs[0])[:,:,2], interpolation='nearest')
        plt.axis('off')
        plt.show()
def mnistFullyConnectedClassif(train=False):
    model = {
        'inputLayer' : FlatInputLayer(inputsize=784),
        'hiddenLayers' : [
            FullyConnectedLayer(inputsize=784,outputsize=1000,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh),
            FullyConnectedLayer(inputsize=1000,outputsize=200,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh)
            ],
        'outputLayer' : FullyConnectedLayer(inputsize=200,outputsize=10,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.softmax)
    }

    batch_size = 50
    net = Network(model, objective='classification', batch_size=batch_size)
    net.setupTraining("cross-entropy", "Adam")

    init = tf.initialize_all_variables()
    mnist = MNISTData(train_dir='MNIST_data', one_hot=True)

    saver = tf.train.Saver()
    sess = tf.Session()

    with sess.as_default():
        assert tf.get_default_session() is sess
        sess.run(init)
        if train==False:
            saver.restore(sess, "/home/adrien/workspace/DeepNet/mnistFullyConnectedClassif.ckpt")
            print "Test accuracy %g"%net.evaluate(mnist.test['images'], mnist.test['labels'])
        else:
            for i in range(20000):
                batch_xs, batch_ys = mnist.next_batch(batch_size, set=mnist.train)
                
                if i%1000 == 0:
                    cost = net.cost.eval(feed_dict={net.x: batch_xs, net.target: batch_ys})
                    print "step %d, training cost %g"%(i,cost)
            
                net.train(batch_xs, batch_ys)

            print "Test accuracy %g"%net.evaluate(mnist.test['images'], mnist.test['labels'])
            save_path = saver.save(sess, "/home/adrien/workspace/DeepNet/mnistFullyConnectedClassif.ckpt")

        plt.figure(1)
        plt.gray()
        plt.imshow(mnist.test['images'][0].reshape((28,28)), interpolation='nearest')
        plt.title(str(np.argmax(net.predict([mnist.test['images'][0]]))))
        plt.show()
def mitos12ClassifierFromConvAutoEncoder3(train=False, resumeTraining=False, iterations=20000):
    autoEncoderModel = {
        'inputLayer' : ImageInputLayer(width=64,height=64,channels=3),
        'hiddenLayers' : [
            ConvolutionLayer(kernelsize=5, channels=3, features=12, stride=2, weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[64,64,3]),
            ConvolutionLayer(kernelsize=5, channels=12, features=48, stride=2,weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[32,32,12]),
            ConvolutionLayer(kernelsize=5, channels=48, features=192, stride=2,weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[16,16,48])
        ]
    }

    classifierModel = {
        'inputLayer': ImageInputLayer(width=8,height=8,channels=192),
        'hiddenLayers': [
            ImageToVectorLayer(imagesize=(8,8,192)),
            FullyConnectedLayer(inputsize=8*8*192,outputsize=200,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh),
            FullyConnectedLayer(inputsize=200,outputsize=100,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh)
            ],
        'outputLayer' : FullyConnectedLayer(inputsize=100,outputsize=2,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.softmax)
    }

    batch_size = 50
    autoEncoder = Network(autoEncoderModel, objective='reconstruction', batch_size=batch_size)

    clf = Network(classifierModel, objective='classification', batch_size=batch_size)
    clf.setupTraining("cross-entropy", "Adam", a=0.995)

    init = tf.initialize_all_variables()
    basedir = "/media/sf_E_DRIVE/Dropbox/ULB/Doctorat/ImageSet/MITOS12/"
    mitos12 = MITOS12Data(train_dirs=[os.path.join(basedir,d) for d in ["A00_v2", "A01_v2", "A02_v2", "A03_v2", "A04_v2"]])

    aesaver = tf.train.Saver(autoEncoder.getVariables())
    clfsaver = tf.train.Saver(clf.getVariables())
    sess = tf.Session()

    with sess.as_default():
        assert tf.get_default_session() is sess
        sess.run(init)
        aesaver.restore(sess, "/home/adrien/workspace/DeepNet/mitos12ConvAutoEncoder3.ckpt")

        if train==False or resumeTraining==True:
            clfsaver.restore(sess, "/home/adrien/workspace/DeepNet/mitos12ClfFromConvAutoEncoder3.ckpt")
        if train==True:
            for i in range(iterations):
                batch = mitos12.next_supervised_batch(batch_size)
                input_images = [b[0] for b in batch]
                
                batch_xs = autoEncoder.encode(input_images)
                batch_ys = [b[1] for b in batch]

                if i%1000==0:
                    cost = clf.cost.eval(feed_dict={clf.x: batch_xs, clf.target: batch_ys})
                    loss = clf.loss.eval(feed_dict={clf.x: batch_xs, clf.target: batch_ys})
                    l2loss = clf.l2loss.eval()
                    print "step %d, training cost %g, loss %g, l2loss %g"%(i,cost,loss,l2loss)
                    # cost = clf.cost.eval(feed_dict={clf.x: batch_xs, clf.target: batch_ys})
                    # print "step %d, training cost %g"%(i,cost)
                    save_path = clfsaver.save(sess, "/home/adrien/workspace/DeepNet/mitos12ClfFromConvAutoEncoder3.ckpt")
            
                clf.train(batch_xs, batch_ys)

            save_path = clfsaver.save(sess, "/home/adrien/workspace/DeepNet/mitos12ClfFromConvAutoEncoder3.ckpt")

        # Eval :
        Cmat = np.zeros((2,2))
        for i in range(50):
            batch = mitos12.next_supervised_batch(batch_size)
            input_images = [b[0] for b in batch]
            
            batch_xs = autoEncoder.encode(input_images)
            batch_ys = [b[1] for b in batch]

            pred = clf.predict(batch_xs)
            Cmat += C(pred,batch_ys)

        print Cmat

        im_, path, basename = mitos12.images[18]
        im = np.array(im_)
        stride = 15
        rangex = np.arange(0,im.shape[0]-64,stride)
        rangey = np.arange(0,im.shape[1]-64,stride)
        ts = [(t/len(rangey), t%len(rangey)) for t in range(len(rangex)*len(rangey))]
        chunks = [im[tx*stride:tx*stride+64,ty*stride:ty*stride+64,:] for tx,ty in ts]
        chunksPos = [(tx*stride,ty*stride) for tx,ty in ts]
        pMitosis = np.zeros((im.shape[0], im.shape[1], 3))

        print len(chunks)        
        for t in range(len(chunks)/50):
            batch = chunks[t*50:t*50+50]
            batch_xs = autoEncoder.encode(batch)
            is_mitosis = clf.predict(batch_xs)
            for i,p in enumerate(is_mitosis):
                cp = chunksPos[t*50+i]
                pMitosis[cp[0]:cp[0]+64, cp[1]:cp[1]+64, 0] += p[0]
                pMitosis[cp[0]:cp[0]+64, cp[1]:cp[1]+64, 1] += p[1]
                pMitosis[cp[0]:cp[0]+64, cp[1]:cp[1]+64, 2] += 1

        plt.figure()
        plt.gray()
        plt.imshow(pMitosis[:,:,0], interpolation=None)
        plt.figure()
        plt.imshow(pMitosis[:,:,1], interpolation=None)
        plt.figure()
        plt.imshow(pMitosis[:,:,2], interpolation=None)
        plt.figure()
        plt.imshow(pMitosis[:,:,0]/pMitosis[:,:,2], interpolation=None)
        plt.figure()
        plt.imshow(plt.imread(basename+".jpg"))
        plt.show()
def mitos12ConvAutoEncoder3(train=False, resumeTraining=False, iterations=20000):
    model = {
        'inputLayer' : ImageInputLayer(width=64,height=64,channels=3),
        'hiddenLayers' : [
            ConvolutionLayer(kernelsize=5, channels=3, features=12, stride=2, weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[64,64,3]),
            ConvolutionLayer(kernelsize=5, channels=12, features=48, stride=2,weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[32,32,12]),
            ConvolutionLayer(kernelsize=5, channels=48, features=192, stride=2,weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[16,16,48])
        ]
    }

    batch_size = 50
    net = Network(model, objective='reconstruction', batch_size=batch_size)
    net.setupTraining("squared-diff", "Adam")

    init = tf.initialize_all_variables()
    basedir = "/media/sf_E_DRIVE/Dropbox/ULB/Doctorat/ImageSet/MITOS12/"
    mitos12 = MITOS12Data(train_dirs=[os.path.join(basedir,d) for d in ["A00_v2", "A01_v2", "A02_v2", "A03_v2", "A04_v2"]])

    saver = tf.train.Saver()
    sess = tf.Session()

    with sess.as_default():
        assert tf.get_default_session() is sess
        sess.run(init)
        if train==False or resumeTraining==True:
            saver.restore(sess, "/home/adrien/workspace/DeepNet/mitos12ConvAutoEncoder3.ckpt")
        if train==True:
            for i in range(iterations):
                batch_xs = mitos12.next_batch(batch_size)
                batch_ys = batch_xs
                
                if i%1000 == 0:
                    cost = net.cost.eval(feed_dict={net.x: batch_xs, net.target: batch_ys})
                    loss = net.loss.eval(feed_dict={net.x: batch_xs, net.target: batch_ys})
                    l2loss = net.l2loss.eval()
                    print "step %d, training cost %g, loss %g, l2loss %g"%(i,cost,loss,l2loss)
                    save_path = saver.save(sess, "/home/adrien/workspace/DeepNet/mitos12ConvAutoEncoder3.ckpt")
            
                net.train(batch_xs, batch_ys)

            save_path = saver.save(sess, "/home/adrien/workspace/DeepNet/mitos12ConvAutoEncoder3.ckpt")

        xs = mitos12.next_batch(batch_size)

        #es = net.encode(xs)
        
        # for idx in range(len(es)):
        #     max_act = 0
        #     arg_max_act = 0
        #     es0 = es[idx]
        #     for t in np.arange(0,192):
        #         if( es0[:,:,t].sum() > max_act ):
        #             max_act = es0[:,:,t].sum()
        #             arg_max_act = t
            
        #     print arg_max_act
        # return
        # featt = np.zeros(es0.shape)
        # featt[:,:,arg_max_act] = es0[:,:,arg_max_act]
        # rs = net.decode([featt])
        # print es0[:,:,arg_max_act]
        # plt.figure()
        # plt.gray()
        # plt.imshow(es0[:,:,arg_max_act], interpolation='nearest')
        # plt.figure()
        # plt.imshow(rs[0]/rs[0].max(), interpolation='nearest')
        # plt.show()
        # return

        # rs = net.predict(xs)
        # rs[0][rs[0]>1.] = 1.
        
        # plt.figure(1)
        # plt.subplot(2,3,1)
        # plt.title('Original')
        # plt.imshow(xs[0], interpolation='nearest')
        # plt.axis('off')
        # plt.subplot(2,3,2)
        # plt.title('Reconstruction')
        # plt.imshow(rs[0], interpolation='nearest')
        # plt.axis('off')
        # plt.gray()
        # plt.subplot(2,3,4)
        # plt.title('Diff - R')
        # plt.imshow(np.abs(rs[0]-xs[0])[:,:,0], interpolation='nearest')
        # plt.axis('off')
        # plt.subplot(2,3,5)
        # plt.title('Diff - G')
        # plt.imshow(np.abs(rs[0]-xs[0])[:,:,1], interpolation='nearest')
        # plt.axis('off')
        # plt.subplot(2,3,6)
        # plt.title('Diff - B')
        # plt.imshow(np.abs(rs[0]-xs[0])[:,:,2], interpolation='nearest')
        # plt.axis('off')
        # plt.show()

        W_1 = net.layers[1].W.eval()
        W_1n = (W_1-W_1.min())/(W_1.max()-W_1.min())
        plt.figure()
        for i in range(W_1.shape[3]):
            plt.subplot(4,3,i+1)
            plt.imshow(W_1n[:,:,:,i], interpolation='none')
        plt.show()
def mitos12ClassifierFromConvAutoEncoder3(train=False, resumeTraining=False, iterations=20000):
    autoEncoderModel = {
        'inputLayer' : ImageInputLayer(width=64,height=64,channels=3),
        'hiddenLayers' : [
            ConvolutionLayer(kernelsize=5, channels=3, features=12, stride=2, weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[64,64,3]),
            ConvolutionLayer(kernelsize=5, channels=12, features=48, stride=2,weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[32,32,12]),
            ConvolutionLayer(kernelsize=5, channels=48, features=192, stride=2,weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[16,16,48])
        ]
    }

    classifierModel = {
        'inputLayer': ImageInputLayer(width=8,height=8,channels=192),
        'hiddenLayers': [
            ImageToVectorLayer(imagesize=(8,8,192)),
            FullyConnectedLayer(inputsize=8*8*192,outputsize=200,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh),
            FullyConnectedLayer(inputsize=200,outputsize=100,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh)
            ],
        'outputLayer' : FullyConnectedLayer(inputsize=100,outputsize=2,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.softmax)
    }

    batch_size = 50
    autoEncoder = Network(autoEncoderModel, objective='reconstruction', batch_size=batch_size)

    clf = Network(classifierModel, objective='classification', batch_size=batch_size)
    clf.setupTraining("cross-entropy", "Adam")

    init = tf.initialize_all_variables()
    basedir = "/media/sf_E_DRIVE/Dropbox/ULB/Doctorat/ImageSet/MITOS12/"
    mitos12 = MITOS12Data(train_dirs=[os.path.join(basedir,d) for d in ["A00_v2", "A01_v2", "A02_v2", "A03_v2", "A04_v2"]])

    aesaver = tf.train.Saver(autoEncoder.getVariables())
    clfsaver = tf.train.Saver(clf.getVariables())
    sess = tf.Session()

    with sess.as_default():
        assert tf.get_default_session() is sess
        sess.run(init)
        aesaver.restore(sess, "/home/adrien/workspace/DeepNet/mitos12ConvAutoEncoder3.ckpt")

        if train==False or resumeTraining==True:
            clfsaver.restore(sess, "/home/adrien/workspace/DeepNet/mitos12ClfFromConvAutoEncoder3.ckpt")
        if train==True:
            for i in range(iterations):
                batch = mitos12.next_supervised_batch(batch_size)
                input_images = [b[0] for b in batch]
                
                batch_xs = autoEncoder.encode(input_images)
                batch_ys = [b[1] for b in batch]

                if i%1000==0:
                    cost = clf.cost.eval(feed_dict={clf.x: batch_xs, clf.target: batch_ys})
                    print "step %d, training cost %g"%(i,cost)
                    save_path = clfsaver.save(sess, "/home/adrien/workspace/DeepNet/mitos12ClfFromConvAutoEncoder3.ckpt")
            
                clf.train(batch_xs, batch_ys)

            save_path = clfsaver.save(sess, "/home/adrien/workspace/DeepNet/mitos12ClfFromConvAutoEncoder3.ckpt")

        # Eval :
        Cmat = np.zeros((2,2))
        for i in range(50):
            batch = mitos12.next_supervised_batch(batch_size)
            input_images = [b[0] for b in batch]
            
            batch_xs = autoEncoder.encode(input_images)
            batch_ys = [b[1] for b in batch]

            pred = clf.predict(batch_xs)
            Cmat += C(pred,batch_ys)

        print Cmat
def mitos12ConvAutoEncoder4(train=False, resumeTraining=False, iterations=20000):
    model = {
        'inputLayer' : ImageInputLayer(width=128,height=128,channels=3),
        'hiddenLayers' : [
            ConvolutionLayer(kernelsize=15, channels=3, features=12, stride=4, weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[128,128,3]),
            ConvolutionLayer(kernelsize=7, channels=12, features=40, stride=2,weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[32,32,12]),
            ConvolutionLayer(kernelsize=5, channels=40, features=80, stride=2,weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[16,16,40])
        ]
    }

    batch_size = 50
    net = Network(model, objective='reconstruction', batch_size=batch_size)
    net.setupTraining("squared-diff", "Adam", a=0.998)
    autoEncoderName = "mitos12ConvAutoEncoder4WithDistortion"

    init = tf.initialize_all_variables()
    basedir = "/media/sf_E_DRIVE/Dropbox/ULB/Doctorat/ImageSet/MITOS12/"
    mitos12 = MITOS12Data(train_dirs=[os.path.join(basedir,d) for d in ["A00_v2", "A01_v2", "A02_v2", "A03_v2", "A04_v2"]],chunksize=(128,128))

    saver = tf.train.Saver()
    sess = tf.Session()

    with sess.as_default():
        assert tf.get_default_session() is sess
        sess.run(init)
        if train==False or resumeTraining==True:
            saver.restore(sess, "/home/adrien/workspace/DeepNet/%s.ckpt"%autoEncoderName)
        if train==True:
            for i in range(iterations):
                batch_xs = mitos12.next_batch(batch_size, noise=True, nc=0.02)
                batch_ys = batch_xs
                
                if i%1000 == 0:
                    cost = net.cost.eval(feed_dict={net.x: batch_xs, net.target: batch_ys})
                    loss = net.loss.eval(feed_dict={net.x: batch_xs, net.target: batch_ys})
                    l2loss = net.l2loss.eval()
                    print "step %d, training cost %g, loss %g, l2loss %g"%(i,cost,loss,l2loss)
                    save_path = saver.save(sess, "/home/adrien/workspace/DeepNet/%s.ckpt"%autoEncoderName)
                    with open("/home/adrien/workspace/DeepNet/%s_results.txt"%autoEncoderName, "a") as resFile:
                        resFile.write("step %d, training cost %g, loss %g, l2loss %g\n"%(i,cost,loss,l2loss))
            
                net.train(batch_xs, batch_ys)

            save_path = saver.save(sess, "/home/adrien/workspace/DeepNet/%s.ckpt"%autoEncoderName)

        xs = mitos12.next_batch(batch_size)
        cost = net.cost.eval(feed_dict={net.x: xs, net.target: xs})
        loss = net.loss.eval(feed_dict={net.x: xs, net.target: xs})
        l2loss = net.l2loss.eval()
        print "test cost %g, loss %g, l2loss %g"%(cost,loss,l2loss)

        es = net.encode(xs)

        # for idx in range(40):
        W = net.layers[1].W.eval()
        W = (W-W.min())/(W.max()-W.min())
        plt.figure()
        for idx in range(12):
            plt.subplot(4,3,idx+1)
            plt.axis('off')
            plt.imshow(W[:,:,:,idx], interpolation='nearest', vmin=-1, vmax=1)
        # plt.show()
        # return
        
        # for idx in range(len(es)):
        #     max_act = 0
        #     arg_max_act = 0
        #     es0 = es[idx]
        #     for t in np.arange(0,160):
        #         if( es0[:,:,t].sum() > max_act ):
        #             max_act = es0[:,:,t].sum()
        #             arg_max_act = t
            
        #     print arg_max_act
        # return
        # featt = np.zeros(es0.shape)
        # featt[:,:,arg_max_act] = es0[:,:,arg_max_act]
        # rs = net.decode([featt])
        # print es0[:,:,arg_max_act]
        # plt.figure()
        # plt.gray()
        # plt.imshow(es0[:,:,arg_max_act], interpolation='nearest')
        # plt.figure()
        # plt.imshow(rs[0]/rs[0].max(), interpolation='nearest')
        # plt.show()
        # return

        rs = net.predict(xs)
        rs[0][rs[0]>1.] = 1.
        
        plt.figure()
        plt.subplot(2,3,1)
        plt.title('Original')
        plt.imshow(xs[0], interpolation='nearest')
        plt.axis('off')
        plt.subplot(2,3,2)
        plt.title('Reconstruction')
        plt.imshow(rs[0], interpolation='nearest')
        plt.axis('off')
        plt.gray()
        plt.subplot(2,3,4)
        plt.title('Diff - R')
        plt.imshow(np.abs(rs[0]-xs[0])[:,:,0], interpolation='nearest')
        plt.axis('off')
        plt.subplot(2,3,5)
        plt.title('Diff - G')
        plt.imshow(np.abs(rs[0]-xs[0])[:,:,1], interpolation='nearest')
        plt.axis('off')
        plt.subplot(2,3,6)
        plt.title('Diff - B')
        plt.imshow(np.abs(rs[0]-xs[0])[:,:,2], interpolation='nearest')
        plt.axis('off')
        plt.show()
def visuNetwork(n=0):
    model = {
        'inputLayer' : FlatInputLayer(inputsize=64*64*3),
        'hiddenLayers' : [
            FullyConnectedLayer(inputsize=64*64*3,outputsize=32*32,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh),
            FullyConnectedLayer(inputsize=32*32,outputsize=16*16,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh),
            FullyConnectedLayer(inputsize=16*16,outputsize=8*8,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh),
        ]
    }
    savedModelPath = "/home/adrien/workspace/DeepNet/mitos12FullyConnectedAE.ckpt"

    batch_size = 200
    net = Network(model, objective='reconstruction', batch_size=batch_size)
    net.setupTraining("squared-diff", "Adam", False)
    mitos12 = MITOS12Data(train_dirs=["/media/sf_VirtualDropbox"])

    varsToRestore = []
    for l in net.layers:
        varsToRestore += l.trainables
    saver = tf.train.Saver(varsToRestore)
    sess = tf.Session()

    init = tf.initialize_all_variables()

    with sess.as_default():
        assert tf.get_default_session() is sess
        sess.run(init)
        saver.restore(sess, savedModelPath)

        batch_xs = mitos12.next_batch(batch_size, flat=True)
        ys = net.encode(batch_xs)

        print ys.shape
        plt.figure(1)
        plt.imshow(ys, vmin=ys.min(), vmax=ys.max(), interpolation='nearest')
        plt.gray()
        plt.show()
        return

    #x = tf.Variable(tf.truncated_normal([1,64*64*3], stddev=0.1))
    # x = np.ones((1,64*64*3))*0.5

    # batch_size = 1
    # net = Network(model, objective='reconstruction', batch_size=batch_size)
    # net.setupTraining("squared-diff", "Adam", False)

    # varsToRestore = []
    # for l in net.layers:
    #     varsToRestore += l.trainables

    # mitos12 = MITOS12Data(train_dirs=["/media/sf_VirtualDropbox"])

    # saver = tf.train.Saver(varsToRestore)
    # sess = tf.Session()

    # init = tf.initialize_all_variables()

    # with sess.as_default():
    #     assert tf.get_default_session() is sess
    #     sess.run(init)
    #     saver.restore(sess, savedModelPath)

    #     W = net.layers[3].W.eval()
    #     print W.shape, W.min(), W.max()
    #     plt.figure(1)
    #     plt.imshow(W, vmin=W.min(), vmax=W.max())
    #     plt.gray()
    #     plt.show()
    #     return

    #     target = [0. for i in range(64)]
    #     target[n] = 1.
    #     print target

    #     y = net.encoded
    #     loss = y-target
    #     grad = tf.gradients(loss, net.x)

    #     for i in range(100):
    #         gradx = grad[0].eval(feed_dict={net.x: x})
    #         if i%100 == 0:
    #             print gradx
    #         x += gradx*0.1

    #     fig = plt.figure(1)
    #     plt.imshow(x.reshape([64,64,3]))
    #     fig.savefig('neuron_%d_max_input.png'%(n), bbox_inches='tight')
    #     print "Saved figure"
def mitos12FullyConnectedAE(train=False, resumeTraining=False, iterations=20000):
    model = {
        'inputLayer' : FlatInputLayer(inputsize=64*64*3),
        'hiddenLayers' : [
            FullyConnectedLayer(inputsize=64*64*3,outputsize=64*64,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh),
            FullyConnectedLayer(inputsize=64*64,outputsize=32*32,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh),
            FullyConnectedLayer(inputsize=32*32,outputsize=16*16,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh),
        ]
    }
    savedModelPath = "/home/adrien/workspace/DeepNet/mitos12FullyConnectedAE.ckpt"

    batch_size = 50
    net = Network(model, objective='reconstruction', batch_size=batch_size)
    net.setupTraining("squared-diff", "Adam", True)

    init = tf.initialize_all_variables()
    mitos12 = MITOS12Data(train_dirs=["/media/sf_VirtualDropbox"])

    saver = tf.train.Saver()
    sess = tf.Session()

    with sess.as_default():
        assert tf.get_default_session() is sess
        sess.run(init)
        if train==False or resumeTraining==True:
            print "Restoring from "+savedModelPath
            saver.restore(sess, savedModelPath)
        if train==True:
            for i in range(iterations):
                batch_xs = mitos12.next_batch(batch_size, flat=True)
                batch_ys = batch_xs
                
                if i%1000 == 0:
                    cost = net.cost.eval(feed_dict={net.x: batch_xs, net.target: batch_ys})
                    print "step %d, training cost %g"%(i,cost)
                    save_path = saver.save(sess,savedModelPath)
            
                net.train(batch_xs, batch_ys)

            save_path = saver.save(sess,savedModelPath)


        im = np.array(mitos12.getRandomImage())
        print im.shape
        cols = im.shape[0]/64
        rows = im.shape[1]/64
        xs_1 = [im[i*64:i*64+64,j*64:j*64+64,:].flatten() for i in xrange(cols) for j in xrange(rows)]
        xs_2 = [im[32+i*64:32+i*64+64,32+j*64:32+j*64+64,:].flatten() for i in xrange(cols) for j in xrange(rows)]
        rs_1 = net.predict(xs_1)
        rs_2 = net.predict(xs_2)
        rim = np.zeros(im.shape)
        im_k = np.zeros((im.shape[0],im.shape[1]))
        for k,r in enumerate(rs_1):
            i = (k/cols)*64
            j = (k%cols)*64
            rim[i:i+64,j:j+64,:] += r.reshape((64,64,3))
            im_k[i:i+64,j:j+64] += 1
        for k,r in enumerate(rs_2):
            i = (k/cols)*64
            j = (k%cols)*64
            rim[32+i:32+i+64,32+j:32+j+64,:] += r.reshape((64,64,3))
            im_k[32+i:32+i+64,32+j:32+j+64] += 1

        rim[im_k>0,0] /= im_k[im_k>0]
        rim[im_k>0,1] /= im_k[im_k>0]
        rim[im_k>0,2] /= im_k[im_k>0]
        rim[rim>1.] = 1.
        rim[rim<0.] = 0.
        plt.figure(0)
        plt.subplot(1,2,1)
        plt.imshow(im, interpolation='nearest')
        plt.axis('off')
        plt.subplot(1,2,2)
        plt.imshow(rim, interpolation='nearest')
        plt.axis('off')
        plt.show()