def test_shuffle_row_elements(self):
        """Test that RandomStreams.shuffle_row_elements generates the right results"""
        # Check over two calls to see if the random state is correctly updated.

        # On matrices, for each row, the elements of that row should be shuffled.
        # Note that this differs from numpy.random.shuffle, where all the elements
        # of the matrix are shuffled.
        random = RandomStreams(utt.fetch_seed())
        m_input = tensor.dmatrix()
        f = function([m_input],
                     random.shuffle_row_elements(m_input),
                     updates=random.updates())

        # Generate the elements to be shuffled
        val_rng = numpy.random.RandomState(utt.fetch_seed() + 42)
        in_mval = val_rng.uniform(-2, 2, size=(20, 5))
        fn_mval0 = f(in_mval)
        fn_mval1 = f(in_mval)
        print(in_mval[0])
        print(fn_mval0[0])
        print(fn_mval1[0])
        assert not numpy.all(in_mval == fn_mval0)
        assert not numpy.all(in_mval == fn_mval1)
        assert not numpy.all(fn_mval0 == fn_mval1)

        rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)
        rng = numpy.random.RandomState(int(rng_seed))
        numpy_mval0 = in_mval.copy()
        numpy_mval1 = in_mval.copy()
        for row in numpy_mval0:
            rng.shuffle(row)
        for row in numpy_mval1:
            rng.shuffle(row)

        assert numpy.all(numpy_mval0 == fn_mval0)
        assert numpy.all(numpy_mval1 == fn_mval1)

        # On vectors, the behaviour is the same as numpy.random.shuffle,
        # except that it does not work in place, but returns a shuffled vector.
        random1 = RandomStreams(utt.fetch_seed())
        v_input = tensor.dvector()
        f1 = function([v_input], random1.shuffle_row_elements(v_input))

        in_vval = val_rng.uniform(-3, 3, size=(12, ))
        fn_vval = f1(in_vval)
        numpy_vval = in_vval.copy()
        vrng = numpy.random.RandomState(int(rng_seed))
        vrng.shuffle(numpy_vval)
        print(in_vval)
        print(fn_vval)
        print(numpy_vval)
        assert numpy.all(numpy_vval == fn_vval)

        # Trying to shuffle a vector with function that should shuffle
        # matrices, or vice versa, raises a TypeError
        self.assertRaises(TypeError, f1, in_mval)
        self.assertRaises(TypeError, f, in_vval)
    def test_shuffle_row_elements(self):
        """Test that RandomStreams.shuffle_row_elements generates the right results"""
        # Check over two calls to see if the random state is correctly updated.

        # On matrices, for each row, the elements of that row should be shuffled.
        # Note that this differs from numpy.random.shuffle, where all the elements
        # of the matrix are shuffled.
        random = RandomStreams(utt.fetch_seed())
        m_input = tensor.dmatrix()
        f = function([m_input], random.shuffle_row_elements(m_input), updates=random.updates())

        # Generate the elements to be shuffled
        val_rng = numpy.random.RandomState(utt.fetch_seed()+42)
        in_mval = val_rng.uniform(-2, 2, size=(20, 5))
        fn_mval0 = f(in_mval)
        fn_mval1 = f(in_mval)
        print(in_mval[0])
        print(fn_mval0[0])
        print(fn_mval1[0])
        assert not numpy.all(in_mval == fn_mval0)
        assert not numpy.all(in_mval == fn_mval1)
        assert not numpy.all(fn_mval0 == fn_mval1)

        rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)
        rng = numpy.random.RandomState(int(rng_seed))
        numpy_mval0 = in_mval.copy()
        numpy_mval1 = in_mval.copy()
        for row in numpy_mval0:
            rng.shuffle(row)
        for row in numpy_mval1:
            rng.shuffle(row)

        assert numpy.all(numpy_mval0 == fn_mval0)
        assert numpy.all(numpy_mval1 == fn_mval1)

        # On vectors, the behaviour is the same as numpy.random.shuffle,
        # except that it does not work in place, but returns a shuffled vector.
        random1 = RandomStreams(utt.fetch_seed())
        v_input = tensor.dvector()
        f1 = function([v_input], random1.shuffle_row_elements(v_input))

        in_vval = val_rng.uniform(-3, 3, size=(12,))
        fn_vval = f1(in_vval)
        numpy_vval = in_vval.copy()
        vrng = numpy.random.RandomState(int(rng_seed))
        vrng.shuffle(numpy_vval)
        print(in_vval)
        print(fn_vval)
        print(numpy_vval)
        assert numpy.all(numpy_vval == fn_vval)

        # Trying to shuffle a vector with function that should shuffle
        # matrices, or vice versa, raises a TypeError
        self.assertRaises(TypeError, f1, in_mval)
        self.assertRaises(TypeError, f, in_vval)
Example #3
0
    def call(self, x, **kwargs):
        from theano import tensor as T
        from theano.tensor.shared_randomstreams import RandomStreams
        if K.backend() == "theano":
            import theano
            mask_rng = RandomStreams(self.seed)

            ints = mask_rng.random_integers(size=K.expand_dims(x.shape[0], 0),
                                            high=x.shape[1] - 1)

            def set_value_at_position(i, ns_x):
                zeros = T.zeros_like(ns_x[0, :])
                return T.set_subtensor(zeros[:i], 1)

            result, updates = theano.scan(fn=set_value_at_position,
                                          outputs_info=None,
                                          sequences=ints,
                                          non_sequences=x)
            mask = mask_rng.shuffle_row_elements(result)
        elif K.backend() == "tensorflow":
            import tensorflow as tf
            tf.set_random_seed(self.seed)
            ints = tf.random_uniform(shape=K.expand_dims(tf.shape(x)[0], 0),
                                     maxval=x.shape[1],
                                     dtype=tf.int32)
            result = tf.sequence_mask(ints, maxlen=x.shape[1])
            parallel_iterations = self._deterministic and 1 or 10
            mask = tf.cast(
                tf.map_fn(tf.random_shuffle,
                          result,
                          parallel_iterations=parallel_iterations), K.floatx())
        else:
            raise NotImplementedError()
        return K.concatenate([x * mask, mask])
Example #4
0
class MaskGenerator(object):
    def __init__(self, input_size, hidden_sizes, l, random_seed=1234):
        self._random_seed = random_seed
        self._mrng = MRG_RandomStreams(seed=random_seed)
        self._rng = RandomStreams(seed=random_seed)

        self._hidden_sizes = hidden_sizes
        self._input_size = input_size
        self._l = l

        self.ordering = theano.shared(value=np.arange(
            input_size, dtype=theano.config.floatX),
                                      name='ordering',
                                      borrow=False)

        # Initial layer connectivity
        self.layers_connectivity = [
            theano.shared(value=(self.ordering + 1).eval(),
                          name='layer_connectivity_input',
                          borrow=False)
        ]
        for i in range(len(self._hidden_sizes)):
            self.layers_connectivity += [
                theano.shared(value=np.zeros((self._hidden_sizes[i]),
                                             dtype=theano.config.floatX),
                              name='layer_connectivity_hidden{0}'.format(i),
                              borrow=False)
            ]
        self.layers_connectivity += [self.ordering]

        ## Theano functions
        new_ordering = self._rng.shuffle_row_elements(self.ordering)
        self.shuffle_ordering = theano.function(
            name='shuffle_ordering',
            inputs=[],
            updates=[(self.ordering, new_ordering),
                     (self.layers_connectivity[0], new_ordering + 1)])

        self.layers_connectivity_updates = []
        for i in range(len(self._hidden_sizes)):
            self.layers_connectivity_updates += [
                self._get_hidden_layer_connectivity(i)
            ]
        # self.layers_connectivity_updates = [self._get_hidden_layer_connectivity(i) for i in range(len(self._hidden_sizes))]  # WTF THIS DO NOT WORK
        self.sample_connectivity = theano.function(
            name='sample_connectivity',
            inputs=[],
            updates=[(self.layers_connectivity[i + 1],
                      self.layers_connectivity_updates[i])
                     for i in range(len(self._hidden_sizes))])

        # Save random initial state
        self._initial_mrng_rstate = copy.deepcopy(self._mrng.rstate)
        self._initial_mrng_state_updates = [
            state_update[0].get_value()
            for state_update in self._mrng.state_updates
        ]

        # Ensuring valid initial connectivity
        self.sample_connectivity()

    def reset(self):
        # Set Original ordering
        self.ordering.set_value(
            np.arange(self._input_size, dtype=theano.config.floatX))

        # Reset RandomStreams
        self._rng.seed(self._random_seed)

        # Initial layer connectivity
        self.layers_connectivity[0].set_value((self.ordering + 1).eval())
        for i in range(1, len(self.layers_connectivity) - 1):
            self.layers_connectivity[i].set_value(
                np.zeros((self._hidden_sizes[i - 1]),
                         dtype=theano.config.floatX))
        self.layers_connectivity[-1].set_value(self.ordering.get_value())

        # Reset MRG_RandomStreams (GPU)
        self._mrng.rstate = self._initial_mrng_rstate
        for state, value in zip(self._mrng.state_updates,
                                self._initial_mrng_state_updates):
            state[0].set_value(value)

        self.sample_connectivity()

    def _get_p(self, start_choice):
        start_choice_idx = (start_choice - 1).astype('int32')
        p_vals = T.concatenate([
            T.zeros((start_choice_idx, )),
            T.nnet.nnet.softmax(self._l * T.arange(
                start_choice, self._input_size, dtype=theano.config.floatX))[0]
        ])
        p_vals = T.inc_subtensor(
            p_vals[start_choice_idx], 1.
        )  # Stupid hack because de multinomial does not contain a safety for numerical imprecision.
        return p_vals

    def _get_hidden_layer_connectivity(self, layerIdx):
        layer_size = self._hidden_sizes[layerIdx]
        if layerIdx == 0:
            p_vals = self._get_p(T.min(self.layers_connectivity[layerIdx]))
        else:
            p_vals = self._get_p(
                T.min(self.layers_connectivity_updates[layerIdx - 1]))

        # #Implementations of np.choose in theano GPU
        # return T.nonzero(self._mrng.multinomial(pvals=[self._p_vals] * layer_size, dtype=theano.config.floatX))[1].astype(dtype=theano.config.floatX)
        # return T.argmax(self._mrng.multinomial(pvals=[self._p_vals] * layer_size, dtype=theano.config.floatX), axis=1)
        return T.sum(T.cumsum(self._mrng.multinomial(
            pvals=T.tile(p_vals[::-1][None, :], (layer_size, 1)),
            dtype=theano.config.floatX),
                              axis=1),
                     axis=1)

    def _get_mask(self, layerIdxIn, layerIdxOut):
        return (self.layers_connectivity[layerIdxIn][:, None] <=
                self.layers_connectivity[layerIdxOut][None, :]).astype(
                    theano.config.floatX)

    def get_mask_layer_UPDATE(self, layerIdx):
        return self._get_mask(layerIdx, layerIdx + 1)

    def get_direct_input_mask_layer_UPDATE(self, layerIdx):
        return self._get_mask(0, layerIdx)

    def get_direct_output_mask_layer_UPDATE(self, layerIdx):
        return self._get_mask(layerIdx, -1)
Example #5
0
    def run(self):
        
        worker = Worker(self.in_window_shape, 
                self.out_window_shape, 
                self.pred_window_size,
                self.stride, 
                self.img_size, 
                self.classifier, 
                self.n_train_files, 
                self.n_test_files, 
                self.samples_per_image, 
                self.on_ratio, 
                self.directory_input, 
                self.directory_labels, 
                self.membrane_edges,
                self.layers_3D, 
                self.pre_processed_folder,
                self.batch_size,
                self.num_kernels,
                self.kernel_sizes,
                self.maxoutsize,
                self.params,
                self.eval_window_size,
                config_file,
                self.n_validation_samples)

        if self.pre_process == True:
            print "Generating Train/Test Set..."

            worker.generate_train_data()
        
        data,n_train_samples = worker.get_train_data()

        # Load weight layers
        self.load_layers(self.load_n_layers)
        
        print 'Loading data ...'
        
        if self.predict_only == False:
            train_set_x,train_set_y = data[0],data[2]
            n_train_batches         = train_set_x.get_value(borrow=True).shape[0]

        valid_set_x,valid_set_y = data[1],data[3]
        n_valid_batches         = valid_set_x.get_value(borrow=True).shape[0]

        print 'Initializing neural network ...'

        # print error if batch size is to large
        if valid_set_y.eval().size<self.batch_size:
            print 'Error: Batch size is larger than size of validation set.'
        
        print 'Batch size: ',self.batch_size

        n_train_batches /= self.batch_size
        n_valid_batches /= self.batch_size

        # symbolic variables
        x       = T.matrix('x')        # input image data
        y       = T.matrix('y')        # input label data
        
        # Initialize networks
        if self.net == 'ConvNet':
            model = ConvNet(rng, 
                    self.batch_size, 
                    self.layers_3D, 
                    self.num_kernels, 
                    self.kernel_sizes, 
                    x, 
                    y,
                    self.in_window_shape,
                    self.out_window_shape,
                    self.pred_window_size,
                    self.classifier,
                    maxoutsize = self.maxoutsize, 
                    params = self.params, 
                    dropout = self.dropout)

            model_val = model.TestVersion(rng, 
                    self.batch_size, 
                    self.layers_3D, 
                    self.num_kernels, 
                    self.kernel_sizes, 
                    x, 
                    y,
                    self.in_window_shape,
                    self.out_window_shape,
                    self.pred_window_size,
                    self.classifier,
                    maxoutsize = self.maxoutsize, 
                    params = self.params, 
                    network = model, 
                    dropout = [0.,0.,0.,0.0])
                
        elif self.net == "FullyCon":
            model = FullyCon(rng, 
                    self.batch_size, 
                    self.layers_3D, 
                    x, 
                    y,
                    self.in_window_shape,
                    self.out_window_shape,
                    self.pred_window_size,
                    self.classifier,
                    params = self.params)

            model_val= model.TestVersion(rng, 
                    self.batch_size, 
                    self.layers_3D, 
                    x, 
                    y,
                    self.in_window_shape,
                    self.out_window_shape,
                    self.pred_window_size,
                    self.classifier,
                    params = self.params, 
                    network = model)
        elif self.net == "FullyConCompressed":
            model = FullyConCompressed(rng, 
                    self.batch_size, 
                    self.layers_3D, 
                    x, 
                    y,
                    self.in_window_shape,
                    self.out_window_shape,
                    self.pred_window_size,
                    self.classifier,
                    params = self.params)

            model_val= model.TestVersion(rng, 
                    self.batch_size, 
                    self.layers_3D, 
                    x, 
                    y,
                    self.in_window_shape,
                    self.out_window_shape,
                    self.pred_window_size,
                    self.classifier,
                    params = self.params, 
                    network = model)
        else:
            raise RuntimeError('Unable to load network: ' + str(self.net))

        # Initialize parameters and functions
        cost        = model.layer4.negative_log_likelihood(self.penalty_factor)  # Cost function
        self.params = model.params                                               # List of parameters
        grads       = T.grad(cost, self.params)                                     # Gradient
        index       = T.lscalar()                                                   # Index
        
        # Intialize optimizera
        optimizer = Optimizer()
        updates = optimizer.init_optimizer(self.optimizer, cost, self.params, self.optimizerData)
        srng = RandomStreams(seed=234)
        perm = theano.shared(np.arange(train_set_x.eval().shape[0]))

        # Train functions
        if self.predict_only == False:
            train_model = theano.function(                                          
                        [index],                                                    
                            cost,                                                       
                            updates = updates,      
                            givens  = {                                                 
                                        x: train_set_x[perm[index * self.batch_size: (index + 1) * self.batch_size]], 
                                        y: train_set_y[perm[index * self.batch_size: (index + 1) * self.batch_size]]
                }                                                                   
            )


            # Initialize result arrays
            cost_results        = []
            val_results_pixel   = []
            time_results        = []

            predict_val = f.init_predict(valid_set_x, model_val,self.batch_size,x,index)

            # Solver
            try:
                print '... Solving'
                start_time = time.time()    
                for epoch in range(self.epochs):
                    t1 = time.time()
                    perm              = srng.shuffle_row_elements(perm)
                    train_set_x,train_set_y = f.flip_rotate(train_set_x,
                            train_set_y,
                            self.in_window_shape,
                            self.out_window_shape,
                            perm,
                            index,
                            cost,
                            updates,
                            self.batch_size,
                            x,
                            y,
                            self.classifier,
                            self.layers_3D)

                    costs             = [train_model(i) for i in xrange(n_train_batches)]
                    epoch_cost = np.mean(costs)
                    output_val = f.predict_set(predict_val,n_valid_batches,self.classifier, self.pred_window_size)
                    error_pixel,error_window = f.evaluate(output_val,valid_set_y.get_value(borrow=True),self.eval_window_size,self.classifier)
                    #error_pixel = 0.
                    #error_window = 0.

                    t2 = time.time()
                    epoch_time = (t2-t1)/60.

                    cost_results.append(epoch_cost)
                    val_results_pixel.append(error_pixel)
                    time_results.append(epoch_time)

                    # store parameters
                    self.save_params(self.get_params(), self.path)

                    if self.classifier in ["membrane","synapse"]:
                        print "Epoch {}    Training Cost: {:.5}   Validation Error (pixel/window): {:.5}/{:.5}    Time (epoch/total): {:.2} mins".format(epoch + 1, epoch_cost, error_pixel,error_window, epoch_time)
                    else:
                        print "Epoch {}    Training Cost: {:.5}   Validation Error, Membrane (pixel/window): {:.5}/{:.5}    Validation Error, Synapse (pixel/window): {:.5}/{:.5}   Time (epoch/total): {:.2} mins".format(epoch + 1, epoch_cost, error_pixel[0],error_window[0],error_pixel[1],error_window[1], epoch_time)
            except KeyboardInterrupt:
                print 'Exiting solver ...'
                print ''
            
            # End timer
            end_time = time.time()
            end_epochs = epoch+1

        try:
            results    = np.zeros((4, len(cost_results)))
            results[0] = np.array(cost_results)
            results[1] = np.array(val_results_pixel)
            results[2] = np.array(time_results)
            np.save(self.results_folder + 'results.npy', results)
        except:
            pass
            
        (output, 
         y, 
         error_pixel_before, 
         error_window_before, 
         error_pixel_after, 
         error_window_after) = worker.generate_test_data(model,x,y,index,self.net)

        print 'Error before averaging (pixel/window): ' + str(error_pixel_before) + "/" + str(error_window_before)
        print 'Error after averaging (pixel/window): ' + str(error_pixel_after) + "/" + str(error_window_after)

        # Save and write
        self.write_results(error_pixel_before,error_window_before,error_pixel_after,error_window_after)
        self.write_parameters(end_epochs,n_train_samples)
        
        np.save(self.results_folder + 'output.npy', output)
        np.save(self.results_folder + 'y.npy', y)
        self.write_last_run(self.ID_folder)
        
        make_set = True
        if make_set == True:
            from PIL import Image
            set_folder = self.results_folder+"/set/"
            if os.path.isdir(set_folder) != True:
                os.makedirs(set_folder)
     
            for n in xrange(output.shape[0]):
                im = Image.fromarray(np.uint8(output[n]*255))
                im.save(set_folder + "set_" + str(n) + ".tif")
Example #6
0
class MaskGenerator(object):

    def __init__(self, input_size, hidden_sizes, l, random_seed=1234):
        self._random_seed = random_seed
        self._mrng = MRG_RandomStreams(seed=random_seed)
        self._rng = RandomStreams(seed=random_seed)

        self._hidden_sizes = hidden_sizes
        self._input_size = input_size
        self._l = l

        self.ordering = theano.shared(value=np.arange(input_size, dtype=theano.config.floatX), name='ordering', borrow=False)

        # Initial layer connectivity
        self.layers_connectivity = [theano.shared(value=(self.ordering + 1).eval(), name='layer_connectivity_input', borrow=False)]
        for i in range(len(self._hidden_sizes)):
            self.layers_connectivity += [theano.shared(value=np.zeros((self._hidden_sizes[i]), dtype=theano.config.floatX), name='layer_connectivity_hidden{0}'.format(i), borrow=False)]
        self.layers_connectivity += [self.ordering]

        ## Theano functions
        new_ordering = self._rng.shuffle_row_elements(self.ordering)
        self.shuffle_ordering = theano.function(name='shuffle_ordering',
                                                inputs=[],
                                                updates=[(self.ordering, new_ordering), (self.layers_connectivity[0], new_ordering + 1)])

        self.layers_connectivity_updates = []
        for i in range(len(self._hidden_sizes)):
            self.layers_connectivity_updates += [self._get_hidden_layer_connectivity(i)]
        # self.layers_connectivity_updates = [self._get_hidden_layer_connectivity(i) for i in range(len(self._hidden_sizes))]  # WTF THIS DO NOT WORK
        self.sample_connectivity = theano.function(name='sample_connectivity',
                                                   inputs=[],
                                                   updates=[(self.layers_connectivity[i+1], self.layers_connectivity_updates[i]) for i in range(len(self._hidden_sizes))])

        # Save random initial state
        self._initial_mrng_rstate = copy.deepcopy(self._mrng.rstate)
        self._initial_mrng_state_updates = [state_update[0].get_value() for state_update in self._mrng.state_updates]

        # Ensuring valid initial connectivity
        self.sample_connectivity()

    def reset(self):
        # Set Original ordering
        self.ordering.set_value(np.arange(self._input_size, dtype=theano.config.floatX))

        # Reset RandomStreams
        self._rng.seed(self._random_seed)

        # Initial layer connectivity
        self.layers_connectivity[0].set_value((self.ordering + 1).eval())
        for i in range(1, len(self.layers_connectivity)-1):
            self.layers_connectivity[i].set_value(np.zeros((self._hidden_sizes[i-1]), dtype=theano.config.floatX))
        self.layers_connectivity[-1].set_value(self.ordering.get_value())

        # Reset MRG_RandomStreams (GPU)
        self._mrng.rstate = self._initial_mrng_rstate
        for state, value in zip(self._mrng.state_updates, self._initial_mrng_state_updates):
            state[0].set_value(value)

        self.sample_connectivity()

    def _get_p(self, start_choice):
        start_choice_idx = (start_choice-1).astype('int32')
        p_vals = T.concatenate([T.zeros((start_choice_idx,)), T.nnet.nnet.softmax(self._l * T.arange(start_choice, self._input_size, dtype=theano.config.floatX))[0]])
        p_vals = T.inc_subtensor(p_vals[start_choice_idx], 1.)  # Stupid hack because de multinomial does not contain a safety for numerical imprecision.
        return p_vals

    def _get_hidden_layer_connectivity(self, layerIdx):
        layer_size = self._hidden_sizes[layerIdx]
        if layerIdx == 0:
            p_vals = self._get_p(T.min(self.layers_connectivity[layerIdx]))
        else:
            p_vals = self._get_p(T.min(self.layers_connectivity_updates[layerIdx-1]))

        # #Implementations of np.choose in theano GPU
        # return T.nonzero(self._mrng.multinomial(pvals=[self._p_vals] * layer_size, dtype=theano.config.floatX))[1].astype(dtype=theano.config.floatX)
        # return T.argmax(self._mrng.multinomial(pvals=[self._p_vals] * layer_size, dtype=theano.config.floatX), axis=1)
        return T.sum(T.cumsum(self._mrng.multinomial(pvals=T.tile(p_vals[::-1][None, :], (layer_size, 1)), dtype=theano.config.floatX), axis=1), axis=1)

    def _get_mask(self, layerIdxIn, layerIdxOut):
        return (self.layers_connectivity[layerIdxIn][:, None] <= self.layers_connectivity[layerIdxOut][None, :]).astype(theano.config.floatX)

    def get_mask_layer_UPDATE(self, layerIdx):
        return self._get_mask(layerIdx, layerIdx + 1)

    def get_direct_input_mask_layer_UPDATE(self, layerIdx):
        return self._get_mask(0, layerIdx)

    def get_direct_output_mask_layer_UPDATE(self, layerIdx):
        return self._get_mask(layerIdx, -1)
Example #7
0
class MaskGenerator(object):

    def __init__(self, input_size, hidden_sizes, l, random_seed=1234):
        self._random_seed = random_seed
        self._mrng = MRG_RandomStreams(seed=random_seed)
        self._rng = RandomStreams(seed=random_seed)

        self._hidden_sizes = hidden_sizes
        self._input_size = input_size
        self._l = l

        self.ordering = theano.shared(np.arange(input_size, 
                                                dtype=theano.config.floatX), 
                                      'ordering', 
                                      borrow=False)

        # Initial layer connectivity
        self.layers_connectivity = [theano.shared((self.ordering + 1).eval(), 
                                                  'layer_connectivity_input', 
                                                  borrow=False)]
        for i in range(len(self._hidden_sizes)):
            lc = theano.shared(np.zeros((self._hidden_sizes[i]),dtype=floatX), 
                               'layer_connectivity_hidden{0}'.format(i),
                               borrow=False)
            self.layers_connectivity += [lc]
        self.layers_connectivity += [self.ordering]

        ## Theano functions
        new_ordering = self._rng.shuffle_row_elements(self.ordering)
        updates = [(self.ordering, new_ordering), 
                   (self.layers_connectivity[0], new_ordering + 1)]
        self.shuffle_ordering = theano.function(name='shuffle_ordering',
                                                inputs=[],
                                                updates=updates)

        self.layers_connectivity_updates = []
        for i in range(len(self._hidden_sizes)):
            lcu = self._get_hidden_layer_connectivity(i)
            self.layers_connectivity_updates += [lcu]
        
        hsizes = range(len(self._hidden_sizes))
        updates = [(self.layers_connectivity[i+1], 
                    self.layers_connectivity_updates[i]) for i in hsizes]
        self.sample_connectivity = theano.function(name='sample_connectivity',
                                                   inputs=[],
                                                   updates=updates)

        # Save random initial state
        self._initial_mrng_rstate = copy.deepcopy(self._mrng.rstate)
        self._initial_mrng_state_updates = [sup[0].get_value() for sup in 
                                            self._mrng.state_updates]

        # Ensuring valid initial connectivity
        self.sample_connectivity()

    def reset(self):
        # Set Original ordering
        self.ordering.set_value(np.arange(self._input_size, 
                                          dtype=theano.config.floatX))

        # Reset RandomStreams
        self._rng.seed(self._random_seed)

        # Initial layer connectivity
        self.layers_connectivity[0].set_value((self.ordering + 1).eval())
        for i in range(1, len(self.layers_connectivity)-1):
            value = np.zeros((self._hidden_sizes[i-1]), 
                             dtype=theano.config.floatX)
            self.layers_connectivity[i].set_value(value)
        self.layers_connectivity[-1].set_value(self.ordering.get_value())

        # Reset MRG_RandomStreams (GPU)
        self._mrng.rstate = self._initial_mrng_rstate
        states_values = zip(self._mrng.state_updates, 
                            self._initial_mrng_state_updates)
        for state, value in states_values:
            state[0].set_value(value)

        self.sample_connectivity()

    def _get_p(self, start_choice):
        start_choice_idx = (start_choice-1).astype('int32')
        prob = T.nnet.nnet.softmax(self._l * T.arange(start_choice, 
                                                      self._input_size, 
                                                      dtype=floatX))[0]
        p_vals = T.concatenate([T.zeros((start_choice_idx,)),prob])
        p_vals = T.inc_subtensor(p_vals[start_choice_idx], 1.)  
        return p_vals

    def _get_hidden_layer_connectivity(self, layerIdx):
        layer_size = self._hidden_sizes[layerIdx]
        if layerIdx == 0:
            lc = self.layers_connectivity[layerIdx]
            p_vals = self._get_p(T.min(lc))
        else:
            lc = self.layers_connectivity_updates[layerIdx-1]
            p_vals = self._get_p(T.min(lc))

        return T.sum(
            T.cumsum(self._mrng.multinomial(
            pvals=T.tile(p_vals[::-1][None, :],(layer_size, 1)), 
            dtype=floatX), axis=1), axis=1
        )

    def _get_mask(self, layerIdxIn, layerIdxOut):
        return (self.layers_connectivity[layerIdxIn][:, None] <= 
                self.layers_connectivity[layerIdxOut][None, :]).astype(floatX)

    def get_mask_layer_UPDATE(self, layerIdx):
        return self._get_mask(layerIdx, layerIdx + 1)

    def get_direct_input_mask_layer_UPDATE(self, layerIdx):
        return self._get_mask(0, layerIdx)

    def get_direct_output_mask_layer_UPDATE(self, layerIdx):
        return self._get_mask(layerIdx, -1)