Ejemplo n.º 1
0
def js(fn):
	if fn in ('interp', 'disasm'):
		ud = max(map(os.path.getmtime, ('generator.py', 'insts.td')))
		if os.path.getmtime('scripts/' + fn + '.js') <= ud:
			print 'Loading tables...'
			import generator
			reload(generator)
			generator.build()
	return file('scripts/' + fn + '.js', 'r').read()
Ejemplo n.º 2
0
def js(fn):
    if fn in ("interp", "disasm"):
        ud = max(map(os.path.getmtime, ("generator.py", "insts.td")))
        if os.path.getmtime("scripts/" + fn + ".js") <= ud:
            print "Loading tables..."
            import generator

            reload(generator)
            generator.build()
    return file("scripts/" + fn + ".js", "r").read()
Ejemplo n.º 3
0
    def __init__(self, w, h, batch_size=32, lr=0.0001): #fy: change from 0.001
        super(ModelBCE, self).__init__(w, h, batch_size)

        self.net = generator.build(self.inputHeight, self.inputWidth, self.input_var)

        output_layer_name = 'output'
        prediction = lasagne.layers.get_output(self.net[output_layer_name])

        test_prediction = lasagne.layers.get_output(self.net[output_layer_name], deterministic=True)
        self.predictFunction = theano.function([self.input_var], test_prediction)

        output_var_sal_pooled = T.signal.pool.pool_2d(self.output_var_sal, (4, 4), mode="average_exc_pad", ignore_border=True)
        output_var_fixa_pooled = T.signal.pool.pool_2d(self.output_var_fixa, (4, 4), mode="average_exc_pad", ignore_border=True)
        prediction_pooled = T.signal.pool.pool_2d(prediction, (4, 4), mode="average_exc_pad", ignore_border=True)

        #bce = lasagne.objectives.binary_crossentropy(prediction_pooled, output_var_pooled).mean()
        train_err = (1.)*(KL_div(prediction_pooled, output_var_sal_pooled)) - (1.)*((CC(prediction_pooled, output_var_sal_pooled))) - (1.)*((NSS(prediction_pooled, output_var_fixa_pooled)))

        # parameters update and training
        G_params = lasagne.layers.get_all_params(self.net[output_layer_name], trainable=True)
        self.G_lr = theano.shared(np.array(lr, dtype=theano.config.floatX))
        G_updates = lasagne.updates.nesterov_momentum(train_err, G_params, learning_rate=self.G_lr, momentum=0.5)

        self.G_trainFunction = theano.function(inputs=[self.input_var, self.output_var_sal, self.output_var_fixa], outputs=train_err, updates=G_updates,
                                               allow_input_downcast=True)
    def __init__(self):
        super(ModelBCE, self).__init__()

        prediction = generator.build()

        loss_fn = nn.BCELoss()
        bceloss = loss_fn(prediction, target)
Ejemplo n.º 5
0
    def __init__(self, w, h, batch_size=32, G_lr=3e-4, D_lr=3e-4, alpha=1/20.):
        super(ModelSALGAN, self).__init__(w, h, batch_size)

        # Build Generator
        self.net = generator.build(self.inputHeight, self.inputWidth, self.input_var)

        # Build Discriminator
        self.discriminator = discriminator.build(self.inputHeight, self.inputWidth,
                                                 T.concatenate([self.output_var, self.input_var], axis=1))

        # Set prediction function
        output_layer_name = 'output'

        prediction = lasagne.layers.get_output(self.net[output_layer_name])
        test_prediction = lasagne.layers.get_output(self.net[output_layer_name], deterministic=True)
        self.predictFunction = theano.function([self.input_var], test_prediction)

        disc_lab = lasagne.layers.get_output(self.discriminator['prob'],
                                             T.concatenate([self.output_var, self.input_var], axis=1))
        disc_gen = lasagne.layers.get_output(self.discriminator['prob'],
                                             T.concatenate([prediction, self.input_var], axis=1))

        # Downscale the saliency maps
        output_var_pooled = T.signal.pool.pool_2d(self.output_var, (4, 4), mode="average_exc_pad", ignore_border=True)
        prediction_pooled = T.signal.pool.pool_2d(prediction, (4, 4), mode="average_exc_pad", ignore_border=True)
        train_err = lasagne.objectives.binary_crossentropy(prediction_pooled, output_var_pooled).mean()
        + 1e-4 * lasagne.regularization.regularize_network_params(self.net[output_layer_name], lasagne.regularization.l2)

        # Define loss function and input data
        ones = T.ones(disc_lab.shape)
        zeros = T.zeros(disc_lab.shape)
        D_obj = lasagne.objectives.binary_crossentropy(T.concatenate([disc_lab, disc_gen], axis=0),
                                                       T.concatenate([ones, zeros], axis=0)).mean()
        + 1e-4 * lasagne.regularization.regularize_network_params(self.discriminator['prob'], lasagne.regularization.l2)

        G_obj_d = lasagne.objectives.binary_crossentropy(disc_gen, T.ones(disc_lab.shape)).mean()
        + 1e-4 * lasagne.regularization.regularize_network_params(self.net[output_layer_name], lasagne.regularization.l2)

        G_obj = G_obj_d + train_err * alpha
        cost = [G_obj, D_obj, train_err]

        # parameters update and training of Generator
        G_params = lasagne.layers.get_all_params(self.net[output_layer_name], trainable=True)
        self.G_lr = theano.shared(np.array(G_lr, dtype=theano.config.floatX))
        G_updates = lasagne.updates.adagrad(G_obj, G_params, learning_rate=self.G_lr)
        self.G_trainFunction = theano.function(inputs=[self.input_var, self.output_var], outputs=cost,
                                               updates=G_updates, allow_input_downcast=True)

        # parameters update and training of Discriminator
        D_params = lasagne.layers.get_all_params(self.discriminator['prob'], trainable=True)
        self.D_lr = theano.shared(np.array(D_lr, dtype=theano.config.floatX))
        D_updates = lasagne.updates.adagrad(D_obj, D_params, learning_rate=self.D_lr)
        self.D_trainFunction = theano.function([self.input_var, self.output_var], cost, updates=D_updates,
                                               allow_input_downcast=True)
Ejemplo n.º 6
0
    def __init__(self, w, h, batch_size, lr, regterm, momentum):
        super(ModelBCE, self).__init__(w, h, batch_size)

        self.net = generator.build(self.inputHeight, self.inputWidth,
                                   self.input_var)

        output_layer_name = 'output'

        prediction = lasagne.layers.get_output(self.net[output_layer_name],
                                               deterministic=False)
        # Only for VGG 16 (Upsampling)
        #prediction = T.nnet.abstract_conv.bilinear_upsampling(prediction,16)
        #output_var_pooled = T.signal.pool.pool_2d(self.output_var, (16, 16), mode="average_exc_pad", ignore_border=True)
        bce = lasagne.objectives.binary_crossentropy(
            prediction, self.output_var).mean(
            ) + regterm * lasagne.regularization.regularize_network_params(
                self.net[output_layer_name], lasagne.regularization.l2)
        train_err = bce
        G_params = lasagne.layers.get_all_params(self.net[output_layer_name],
                                                 trainable=True)
        self.G_lr = theano.shared(np.array(lr, dtype=theano.config.floatX))
        G_updates = lasagne.updates.momentum(train_err,
                                             G_params,
                                             learning_rate=self.G_lr,
                                             momentum=momentum)
        self.G_trainFunction = theano.function(
            inputs=[self.input_var, self.output_var],
            outputs=train_err,
            updates=G_updates)

        test_prediction = lasagne.layers.get_output(
            self.net[output_layer_name], deterministic=True)
        # Only for VGG 16 (Upsampling)
        #test_prediction = T.nnet.abstract_conv.bilinear_upsampling(test_prediction,16)
        test_loss = lasagne.objectives.binary_crossentropy(
            test_prediction, self.output_var).mean()
        test_acc = lasagne.objectives.binary_jaccard_index(
            test_prediction, self.output_var).mean()
        self.G_valFunction = theano.function(
            inputs=[self.input_var, self.output_var],
            outputs=[test_loss, test_acc])
        self.predictFunction = theano.function([self.input_var],
                                               test_prediction)
Ejemplo n.º 7
0
    def __init__(self, w, h, batch_size=32, lr=0.001):
        super(ModelBCE, self).__init__(w, h, batch_size)

        # Build Generator
        self.net = generator.build(self.inputHeight, self.inputWidth,
                                   self.input_var)

        # Generator output (train)
        output_layer_name = 'output'
        prediction = lasagne.layers.get_output(self.net[output_layer_name])

        # Generator output (test, disable stochastic behaviour such as dropout)
        test_prediction = lasagne.layers.get_output(
            self.net[output_layer_name], deterministic=True)
        self.predictFunction = theano.function([self.input_var],
                                               test_prediction)

        # Downscale the saliency maps
        output_var_pooled = T.signal.pool.pool_2d(self.output_var, (4, 4),
                                                  mode="average_exc_pad",
                                                  ignore_border=True)
        prediction_pooled = T.signal.pool.pool_2d(prediction, (4, 4),
                                                  mode="average_exc_pad",
                                                  ignore_border=True)

        bce = lasagne.objectives.binary_crossentropy(prediction_pooled,
                                                     output_var_pooled).mean()
        train_err = bce

        # parameters update and training
        G_params = lasagne.layers.get_all_params(self.net[output_layer_name],
                                                 trainable=True)
        self.G_lr = theano.shared(np.array(lr, dtype=theano.config.floatX))
        G_updates = lasagne.updates.nesterov_momentum(train_err,
                                                      G_params,
                                                      learning_rate=self.G_lr,
                                                      momentum=0.5)

        self.G_trainFunction = theano.function(
            inputs=[self.input_var, self.output_var],
            outputs=train_err,
            updates=G_updates,
            allow_input_downcast=True)
Ejemplo n.º 8
0
    def __init__(self, w, h, batch_size, G_lr, regterm, D_lr, alpha):
        super(ModelSALGAN, self).__init__(w, h, batch_size)

        # Build Generator
        self.net = generator.build(self.inputHeight, self.inputWidth, self.input_var)
        self.discriminator = discriminator.build(self.inputHeight, self.inputWidth,T.concatenate([self.output_var, self.input_var], axis=1))
        output_layer_name = 'output'

        prediction = lasagne.layers.get_output(self.net[output_layer_name])

        disc_lab = lasagne.layers.get_output(self.discriminator['prob'],T.concatenate([self.output_var, self.input_var], axis=1))
        disc_gen = lasagne.layers.get_output(self.discriminator['prob'],T.concatenate([prediction, self.input_var], axis=1))

        train_err = lasagne.objectives.binary_crossentropy(prediction, self.output_var).mean() + regterm * lasagne.regularization.regularize_network_params(self.net[output_layer_name], lasagne.regularization.l2)

        # Define loss function and input data
        ones = T.ones(disc_lab.shape)
        zeros = T.zeros(disc_lab.shape)
        D_obj = lasagne.objectives.binary_crossentropy(T.concatenate([disc_lab, disc_gen], axis=0),T.concatenate([ones, zeros], axis=0)).mean() + regterm * lasagne.regularization.regularize_network_params(self.discriminator['prob'], lasagne.regularization.l2)

        G_obj_d = lasagne.objectives.binary_crossentropy(disc_gen, T.ones(disc_lab.shape)).mean() + regterm * lasagne.regularization.regularize_network_params(self.net[output_layer_name], lasagne.regularization.l2)

        G_obj = G_obj_d + train_err * alpha
        cost = [G_obj, D_obj, train_err]

        # parameters update and training of Generator
        G_params = lasagne.layers.get_all_params(self.net[output_layer_name], trainable=True)
        self.G_lr = theano.shared(np.array(G_lr, dtype=theano.config.floatX))
        G_updates = lasagne.updates.momentum(G_obj, G_params, learning_rate=self.G_lr,momentum=0.99)
        self.G_trainFunction = theano.function(inputs=[self.input_var, self.output_var], outputs=cost,updates=G_updates, allow_input_downcast=True)

        # parameters update and training of Discriminator
        D_params = lasagne.layers.get_all_params(self.discriminator['prob'], trainable=True)
        self.D_lr = theano.shared(np.array(D_lr, dtype=theano.config.floatX))
        D_updates = lasagne.updates.momentum(D_obj, D_params, learning_rate=self.D_lr,momentum=0.99)
        self.D_trainFunction = theano.function([self.input_var, self.output_var], cost, updates=D_updates,allow_input_downcast=True)
	
        test_prediction = lasagne.layers.get_output(self.net[output_layer_name], deterministic=True)
	test_loss = lasagne.objectives.binary_crossentropy(test_prediction,self.output_var).mean()
        test_acc = lasagne.objectives.binary_jaccard_index(test_prediction,self.output_var).mean()
        self.G_valFunction = theano.function(inputs=[self.input_var, self.output_var],outputs=[test_loss,test_acc])
        self.predictFunction = theano.function([self.input_var], test_prediction)
Ejemplo n.º 9
0
    def __init__(self, w=320, h=240, batch_size=10,lr=0.01,regterm=1e-05,momentum=0.99):
        super(ModelBCE, self).__init__(w, h, batch_size)

        self.net = generator.build(self.inputHeight, self.inputWidth, self.input_var)

        output_layer_name = 'output'

        prediction = lasagne.layers.get_output(self.net[output_layer_name],deterministic=False)
        bce = lasagne.objectives.binary_crossentropy(prediction, self.output_var).mean() + regterm * lasagne.regularization.regularize_network_params(self.net[output_layer_name], lasagne.regularization.l2)
        train_err = bce
        G_params = lasagne.layers.get_all_params(self.net[output_layer_name], trainable=True)
        self.G_lr = theano.shared(np.array(lr, dtype=theano.config.floatX))
        G_updates = lasagne.updates.momentum(train_err, G_params, learning_rate=self.G_lr,momentum=momentum)
        self.G_trainFunction = theano.function(inputs=[self.input_var, self.output_var], outputs=train_err, updates=G_updates)

        test_prediction = lasagne.layers.get_output(self.net[output_layer_name],deterministic=True)
        test_loss = lasagne.objectives.binary_crossentropy(test_prediction,self.output_var).mean()
        test_acc = lasagne.objectives.binary_jaccard_index(test_prediction,self.output_var).mean()
        self.G_valFunction = theano.function(inputs=[self.input_var, self.output_var],outputs=[test_loss,test_acc])
        self.predictFunction = theano.function([self.input_var], test_prediction)
    def __init__(self, w, h, batch_size=32, G_lr=3e-4, D_lr=3e-4, alpha=1/20.):
        super(ModelSALGAN, self).__init__(w, h, batch_size)

        # Build Generator
        self.net = generator.build(self.inputHeight, self.inputWidth, self.input_var)

        # Build Discriminator
        self.discriminator = discriminator.build(self.inputHeight, self.inputWidth,
                                                 T.concatenate([self.output_var, self.input_var], axis=1))

        output_layer_name = 'output'

        # Generator output (train)
        prediction = lasagne.layers.get_output(self.net[output_layer_name])
        
        # Generator output (test, disable stochastic behaviour such as dropout)
        test_prediction = lasagne.layers.get_output(self.net[output_layer_name], deterministic=True)
        
        # The prediction function: [generator input] --> [test_prediction]
        self.predictFunction = theano.function([self.input_var], test_prediction)

        
        # Get the discriminator output for real input
        disc_lab = lasagne.layers.get_output(self.discriminator['prob'],
                                             T.concatenate([self.output_var, self.input_var], axis=1))
        
        # Get the discriminator output for fake input
        disc_gen = lasagne.layers.get_output(self.discriminator['prob'],
                                             T.concatenate([prediction, self.input_var], axis=1))

        # Downscale the saliency maps
        output_var_pooled = T.signal.pool.pool_2d(self.output_var, (4, 4), mode="average_exc_pad", ignore_border=True)
        prediction_pooled = T.signal.pool.pool_2d(prediction, (4, 4), mode="average_exc_pad", ignore_border=True)
        
        train_err = lasagne.objectives.binary_crossentropy(prediction_pooled, output_var_pooled).mean()
                    + 1e-4 * lasagne.regularization.regularize_network_params(self.net[output_layer_name], 
                                                                              lasagne.regularization.l2)
Ejemplo n.º 11
0
import coffeescript, generator, glob, os, os.path, shutil

if not os.path.exists('build'):
	os.makedirs('build')
generator.build()

for fn in glob.glob('scripts/*.js'):
	bn = os.path.basename(fn)
	shutil.copyfile(fn, 'build/' + bn)

for fn in glob.glob('scripts/*.coffee'):
	bn = os.path.basename(fn)
	with file('build/' + bn, 'w') as fp:
		fp.write(coffeescript.compile(file(fn, 'r').read(), bare=True))

for fn in glob.glob('static/*'):
	bn = os.path.basename(fn)
	shutil.copyfile(fn, 'build/' + bn)
Ejemplo n.º 12
0
import coffeescript, generator, glob, os, os.path, shutil

if not os.path.exists('build'):
    os.makedirs('build')
generator.build()

for fn in glob.glob('scripts/*.js'):
    bn = os.path.basename(fn)
    shutil.copyfile(fn, 'build/' + bn)

for fn in glob.glob('scripts/*.coffee'):
    bn = os.path.basename(fn)
    with file('build/' + bn, 'w') as fp:
        fp.write(coffeescript.compile(file(fn, 'r').read(), bare=True))

for fn in glob.glob('static/*'):
    bn = os.path.basename(fn)
    shutil.copyfile(fn, 'build/' + bn)
Ejemplo n.º 13
0
    def __init__(self,
                 w,
                 h,
                 batch_size=16,
                 G_lr=3e-5,
                 D_lr=3e-5,
                 alpha=1 / 20.):
        super(ModelSALGAN, self).__init__(w, h, batch_size)

        # Build Generator
        self.net = generator.build(self.inputHeight, self.inputWidth,
                                   self.input_var)

        # Build Discriminator
        self.discriminator = discriminator.build(
            self.inputHeight, self.inputWidth,
            T.concatenate([self.output_var_sal, self.input_var], axis=1))

        # Set prediction function
        output_layer_name = 'output'

        prediction = lasagne.layers.get_output(self.net[output_layer_name])
        test_prediction = lasagne.layers.get_output(
            self.net[output_layer_name], deterministic=True)
        self.predictFunction = theano.function([self.input_var],
                                               test_prediction)

        disc_lab = lasagne.layers.get_output(
            self.discriminator['prob'],
            T.concatenate([self.output_var_sal, self.input_var], axis=1))
        disc_gen = lasagne.layers.get_output(
            self.discriminator['prob'],
            T.concatenate([prediction, self.input_var], axis=1))

        # Downscale the saliency maps
        output_var_sal_pooled = T.signal.pool.pool_2d(self.output_var_sal,
                                                      (4, 4),
                                                      mode="average_exc_pad",
                                                      ignore_border=True)
        output_var_fixa_pooled = T.signal.pool.pool_2d(self.output_var_fixa,
                                                       (4, 4),
                                                       mode="average_exc_pad",
                                                       ignore_border=True)
        prediction_pooled = T.signal.pool.pool_2d(prediction, (4, 4),
                                                  mode="average_exc_pad",
                                                  ignore_border=True)
        '''
        ICME17 image dataset
        KLmiu = 2.4948
        KLstd = 1.7421
        CCmiu = 0.3932
        CCstd = 0.2565
        NSSmiu = 0.4539
        NSSstd = 0.2631
        bcemiu = 0.3194
        bcestd = 0.1209
        '''
        #ICME18 image dataset
        KLmiu = 2.9782
        KLstd = 2.1767
        CCmiu = 0.3677
        CCstd = 0.2484
        NSSmiu = 0.5635
        NSSstd = 0.2961
        bcemiu = 0.2374
        bcestd = 0.1066

        #model1
        #train_err = lasagne.objectives.binary_crossentropy(prediction_pooled, output_var_sal_pooled).mean()
        #model6
        train_err = bcemiu + bcestd * (
            (1.) *
            ((KL_div(prediction_pooled, output_var_sal_pooled) - KLmiu) /
             KLstd) - (1.) *
            ((CC(prediction_pooled, output_var_sal_pooled) - CCmiu) / CCstd) -
            (1.) * ((NSS(prediction_pooled, output_var_fixa_pooled) - NSSmiu) /
                    NSSstd))
        #model8
        #train_err = lasagne.objectives.binary_crossentropy(prediction_pooled, output_var_sal_pooled).mean()-(bcemiu+bcestd*((1.)*((CC(prediction_pooled, output_var_sal_pooled)-CCmiu)/CCstd) + (1.)*((NSS(prediction_pooled, output_var_fixa_pooled)-NSSmiu)/NSSstd)))
        +1e-4 * lasagne.regularization.regularize_network_params(
            self.net[output_layer_name], lasagne.regularization.l2)
        #pdb.set_trace()
        # Define loss function and input data
        ones = T.ones(disc_lab.shape)
        zeros = T.zeros(disc_lab.shape)
        D_obj = lasagne.objectives.binary_crossentropy(
            T.concatenate([disc_lab, disc_gen], axis=0),
            T.concatenate([ones, zeros], axis=0)).mean()
        #D_obj = bcemiu+bcestd*((3.)*((KL_div(T.concatenate([disc_lab, disc_gen], axis=0), T.concatenate([ones, zeros], axis=0)).sum()-KLmiu)/KLstd) - (1.)*((CC(T.concatenate([disc_lab, disc_gen], axis=0), T.concatenate([ones, zeros], axis=0))-CCmiu)/CCstd) - (1.)*((NSS(T.concatenate([disc_lab, disc_gen], axis=0), T.concatenate([ones, zeros], axis=0))-NSSmiu)/NSSstd))
        #D_obj = (3.)*((KL_div(T.concatenate([disc_lab, disc_gen], axis=0), T.concatenate([ones, zeros], axis=0)).sum()-KLmiu)/KLstd)
        +1e-4 * lasagne.regularization.regularize_network_params(
            self.discriminator['prob'], lasagne.regularization.l2)

        G_obj_d = lasagne.objectives.binary_crossentropy(
            disc_gen, T.ones(disc_lab.shape)).mean()
        #G_obj_d = bcemiu+bcestd*((3.)*((KL_div(disc_gen, T.ones(disc_lab.shape)).sum()-KLmiu)/KLstd) - (1.)*((CC(disc_gen, T.ones(disc_lab.shape))-CCmiu)/CCstd) - (1.)*((NSS(disc_gen, T.ones(disc_lab.shape))-NSSmiu)/NSSstd))
        +1e-4 * lasagne.regularization.regularize_network_params(
            self.net[output_layer_name], lasagne.regularization.l2)

        G_obj = G_obj_d + train_err * alpha
        cost = [G_obj, D_obj, train_err]

        # parameters update and training of Generator
        G_params = lasagne.layers.get_all_params(self.net[output_layer_name],
                                                 trainable=True)
        self.G_lr = theano.shared(np.array(G_lr, dtype=theano.config.floatX))
        G_updates = lasagne.updates.adagrad(G_obj,
                                            G_params,
                                            learning_rate=self.G_lr)
        self.G_trainFunction = theano.function(
            inputs=[self.input_var, self.output_var_sal, self.output_var_fixa],
            outputs=cost,
            updates=G_updates,
            allow_input_downcast=True,
            on_unused_input='ignore')

        # parameters update and training of Discriminator
        D_params = lasagne.layers.get_all_params(self.discriminator['prob'],
                                                 trainable=True)
        self.D_lr = theano.shared(np.array(D_lr, dtype=theano.config.floatX))
        D_updates = lasagne.updates.adagrad(D_obj,
                                            D_params,
                                            learning_rate=self.D_lr)
        self.D_trainFunction = theano.function(
            [self.input_var, self.output_var_sal, self.output_var_fixa],
            cost,
            updates=D_updates,
            allow_input_downcast=True,
            on_unused_input='ignore')
Ejemplo n.º 14
0
    def __init__(self,
                 w,
                 h,
                 batch_size=16,
                 G_lr=3e-4,
                 D_lr=3e-4,
                 alpha=1 / 20.):
        super(ModelSALGAN, self).__init__(w, h, batch_size)

        # Build Generator
        self.net = generator.build(self.inputHeight, self.inputWidth,
                                   self.input_var)
        #self.net1 = generator.build(self.inputHeight, self.inputWidth, self.input_var)

        # Build Discriminator
        self.discriminator = discriminator.build(
            4, self.inputHeight, self.inputWidth,
            T.concatenate([self.output_var_sal, self.input_var], axis=1))

        # Set prediction function
        output_layer_name = 'output'

        prediction = lasagne.layers.get_output(self.net[output_layer_name])
        test_prediction = lasagne.layers.get_output(
            self.net[output_layer_name], deterministic=True)
        self.predictFunction = theano.function([self.input_var],
                                               test_prediction)

        disc_lab = lasagne.layers.get_output(
            self.discriminator['prob'],
            T.concatenate([self.output_var_sal, self.input_var], axis=1))
        disc_gen = lasagne.layers.get_output(
            self.discriminator['prob'],
            T.concatenate([prediction, self.input_var], axis=1))

        # Downscale the saliency maps
        output_var_sal_pooled = T.signal.pool.pool_2d(self.output_var_sal,
                                                      (4, 4),
                                                      mode="average_exc_pad",
                                                      ignore_border=True)
        output_var_fixa_pooled = T.signal.pool.pool_2d(self.output_var_fixa,
                                                       (4, 4),
                                                       mode="max",
                                                       ignore_border=True)
        prediction_pooled = T.signal.pool.pool_2d(prediction, (4, 4),
                                                  mode="average_exc_pad",
                                                  ignore_border=True)

        #SampleMap_pooled = T.signal.pool.pool_2d(self.SampleMap, (4, 4), mode="average_exc_pad", ignore_border=True)
        #c2e = Cube2Equi(1024)
        #_equi = c2e.to_equi_cv2(prediction)
        '''
        ICME17 image dataset
        KLmiu = 2.4948
        KLstd = 1.7421
        CCmiu = 0.3932
        CCstd = 0.2565
        NSSmiu = 0.4539
        NSSstd = 0.2631
        bcemiu = 0.3194
        bcestd = 0.1209
        
        #ICME18 image dataset
        KLmiu = 2.9782 
        KLstd = 2.1767
        CCmiu = 0.3677 
        CCstd = 0.2484
        NSSmiu = 0.5635
        NSSstd = 0.2961
        bcemiu = 0.2374
        bcestd = 0.1066
        '''

        #model6
        #train_err = bcemiu+bcestd*((1.)*((KL_div(prediction_pooled, output_var_sal_pooled)-KLmiu)/KLstd) - (1.)*((CC(prediction_pooled, output_var_sal_pooled)-CCmiu)/CCstd) - (1.)*((NSS(prediction_pooled, output_var_fixa_pooled)-NSSmiu)/NSSstd))
        #model6_adaptive_weighting
        #KLsc = KL_div(prediction, self.output_var_sal, self.output_var_wei)
        #BCEsc = lasagne.objectives.binary_crossentropy(prediction_pooled, output_var_sal_pooled).mean()
        #inv_sigmaBCE = lasagne.objectives.binary_crossentropy(prediction_pooled, output_var_sal_pooled).std()
        KLsc = KL_div(prediction_pooled, output_var_sal_pooled)
        #CCsc = T.sub(1.0,  CC(prediction_pooled, output_var_sal_pooled))
        #CCsc = CC(prediction, self.output_var_sal, self.output_var_wei )
        CCsc = CC(prediction_pooled, output_var_sal_pooled)
        #NSSsc = T.sub(3.29, NSS(prediction_pooled, output_var_fixa_pooled))
        NSSsc = NSS(prediction_pooled, output_var_fixa_pooled)

        train_err = (self.inv_sigmaKL) * (KLsc) - (self.inv_sigmaCC) * (
            CCsc) - (self.inv_sigmaNSS) * (NSSsc)
        #train_err = (self.inv_sigmaKL)*(KLsc-self.meanKL) - (self.inv_sigmaCC)*(CCsc-self.meanCC) - (self.inv_sigmaNSS)*(NSSsc-self.meanNSS)
        #train_err = 10*(KLsc) - 2*(CCsc) - 1*(NSSsc)
        #train_err = 1.0*(KLsc) - 1.0*(CCsc) - 1.0*(NSSsc)
        #model8
        #train_err = lasagne.objectives.binary_crossentropy(prediction_pooled, output_var_sal_pooled).mean()-(bcemiu+bcestd*((1.)*((CC(prediction_pooled, output_var_sal_pooled)-CCmiu)/CCstd) + (1.)*((NSS(prediction_pooled, output_var_fixa_pooled)-NSSmiu)/NSSstd)))
        #model1

        #train_err = lasagne.objectives.binary_crossentropy(prediction_pooled, output_var_sal_pooled).mean()
        +1e-4 * lasagne.regularization.regularize_network_params(
            self.net[output_layer_name], lasagne.regularization.l2)
        #pdb.set_trace()
        # Define loss function and input data
        ones = T.ones(disc_lab.shape)
        zeros = T.zeros(disc_lab.shape)
        D_obj = lasagne.objectives.binary_crossentropy(
            T.concatenate([disc_lab, disc_gen], axis=0),
            T.concatenate([ones, zeros], axis=0)).mean()
        #D_obj = bcemiu+bcestd*((3.)*((KL_div(T.concatenate([disc_lab, disc_gen], axis=0), T.concatenate([ones, zeros], axis=0)).sum()-KLmiu)/KLstd) - (1.)*((CC(T.concatenate([disc_lab, disc_gen], axis=0), T.concatenate([ones, zeros], axis=0))-CCmiu)/CCstd) - (1.)*((NSS(T.concatenate([disc_lab, disc_gen], axis=0), T.concatenate([ones, zeros], axis=0))-NSSmiu)/NSSstd))
        #D_obj = (3.)*((KL_div(T.concatenate([disc_lab, disc_gen], axis=0), T.concatenate([ones, zeros], axis=0)).sum()-KLmiu)/KLstd)
        +1e-4 * lasagne.regularization.regularize_network_params(
            self.discriminator['prob'], lasagne.regularization.l2)

        G_obj_d = lasagne.objectives.binary_crossentropy(
            disc_gen, T.ones(disc_lab.shape)).mean()
        #G_obj_d = bcemiu+bcestd*((3.)*((KL_div(disc_gen, T.ones(disc_lab.shape)).sum()-KLmiu)/KLstd) - (1.)*((CC(disc_gen, T.ones(disc_lab.shape))-CCmiu)/CCstd) - (1.)*((NSS(disc_gen, T.ones(disc_lab.shape))-NSSmiu)/NSSstd))
        +1e-4 * lasagne.regularization.regularize_network_params(
            self.net[output_layer_name], lasagne.regularization.l2)

        G_obj = G_obj_d + train_err * alpha
        #cost = [G_obj, D_obj, train_err, BCEsc, KLsc, CCsc, NSSsc, inv_sigmaBCE, inv_sigmaKL, inv_sigmaCC, inv_sigmaNSS, prediction]
        #cost = [G_obj, D_obj, train_err, BCEsc, KLsc, CCsc, NSSsc, prediction]
        cost = [G_obj, D_obj, train_err, KLsc, CCsc, NSSsc, prediction]
        #cost = [G_obj, D_obj, train_err]

        # parameters update and training of Generator

        G_params = lasagne.layers.get_all_params(self.net[output_layer_name],
                                                 trainable=True)
        #self.G_lr = theano.shared(np.array(G_lr, dtype=theano.config.floatX))
        #self.G_lr = theano.shared(np.array(self.adaptive_Glr, dtype=theano.config.floatX))
        #G_updates = lasagne.updates.adagrad(G_obj, G_params, learning_rate= self.G_lr)
        G_updates = lasagne.updates.adagrad(G_obj,
                                            G_params,
                                            learning_rate=self.adaptive_Glr_in)
        self.G_trainFunction = theano.function(inputs=[
            self.input_var, self.output_var_sal, self.output_var_fixa,
            self.output_var_wei, self.inv_sigmaKL, self.inv_sigmaCC,
            self.inv_sigmaNSS, self.adaptive_Glr_in
        ],
                                               outputs=cost,
                                               updates=G_updates,
                                               allow_input_downcast=True,
                                               on_unused_input='ignore')
        #self.G_trainFunction = theano.function(inputs=[self.input_var, self.output_var_sal, self.output_var_fixa], outputs=cost,
        #                                       updates=G_updates, allow_input_downcast=True,  on_unused_input='ignore')

        #self.G_trainFunction = theano.function(inputs=[self.input_var, self.output_var_sal, self.output_var_fixa, self.inv_sigmaKL, self.inv_sigmaCC, self.inv_sigmaNSS, self.meanKL, self.meanCC, self.meanNSS, self.adaptive_Glr], outputs=cost,
        #                                       updates=None, allow_input_downcast=True,  on_unused_input='ignore')

        # parameters update and training of Discriminator
        D_params = lasagne.layers.get_all_params(self.discriminator['prob'],
                                                 trainable=True)
        #self.D_lr = theano.shared(np.array(self.adaptive_Dlr, dtype=theano.config.floatX))
        #D_updates = lasagne.updates.adagrad(D_obj, D_params, learning_rate= self.D_lr)
        D_updates = lasagne.updates.adagrad(D_obj,
                                            D_params,
                                            learning_rate=self.adaptive_Dlr_in)
        #self.D_trainFunction = theano.function([self.input_var, self.output_var_sal, self.output_var_fixa], outputs=cost, updates=D_updates,
        #                                       allow_input_downcast=True, on_unused_input='ignore')
        self.D_trainFunction = theano.function([
            self.input_var, self.output_var_sal, self.output_var_fixa,
            self.output_var_wei, self.inv_sigmaKL, self.inv_sigmaCC,
            self.inv_sigmaNSS, self.adaptive_Dlr_in
        ],
                                               outputs=cost,
                                               updates=D_updates,
                                               allow_input_downcast=True,
                                               on_unused_input='ignore')