Esempio n. 1
0
 def test_init(self):
     sys.stdout.write('Auto encoder -> Performing auto encoder initialization test ...')
     sys.stdout.flush()
     ae = MODEL.AutoEncoder(number_visibles = 10,
                            number_hiddens = 10,
                            data=None,
                            visible_activation_function=AFct.SoftPlus,
                            hidden_activation_function=AFct.SoftPlus,
                            cost_function=CFct.SquaredError,
                            initial_weights='AUTO',
                            initial_visible_bias='AUTO',
                            initial_hidden_bias='AUTO',
                            initial_visible_offsets='AUTO',
                            initial_hidden_offsets='AUTO')
     assert numx.all(ae.bv == 0.0)
     assert numx.all(ae.bh == 0.0)
 
     ae = MODEL.AutoEncoder(number_visibles = 10,
                            number_hiddens = 10,
                            data=None,
                            visible_activation_function=AFct.SoftPlus,
                            hidden_activation_function=AFct.SoftPlus,
                            cost_function=CFct.SquaredError,
                            initial_weights='AUTO',
                            initial_visible_bias='INVERSE_SIGMOID',
                            initial_hidden_bias='INVERSE_SIGMOID',
                            initial_visible_offsets='AUTO',
                            initial_hidden_offsets='AUTO')
     assert numx.all(ae.bv == 0.0)
     assert numx.all(ae.bh == 0.0)
     print(' successfully passed!')
     sys.stdout.flush()
Esempio n. 2
0
class Test_SAE_Model(unittest.TestCase):

    numx.random.seed(42)
    ae1 = MODEL.AutoEncoder(number_visibles=2,
                            number_hiddens=4,
                            data=None,
                            visible_activation_function=AFct.SoftPlus,
                            hidden_activation_function=AFct.SoftPlus,
                            cost_function=CFct.SquaredError,
                            initial_weights=0.1,
                            initial_visible_bias='AUTO',
                            initial_hidden_bias='AUTO',
                            initial_visible_offsets='AUTO',
                            initial_hidden_offsets='AUTO')
    ae2 = MODEL.AutoEncoder(number_visibles=4,
                            number_hiddens=2,
                            data=None,
                            visible_activation_function=AFct.SoftPlus,
                            hidden_activation_function=AFct.SoftPlus,
                            cost_function=CFct.SquaredError,
                            initial_weights=0.1,
                            initial_visible_bias='INVERSE_SIGMOID',
                            initial_hidden_bias='INVERSE_SIGMOID',
                            initial_visible_offsets='AUTO',
                            initial_hidden_offsets='AUTO')
    stack = STACK.SAE([ae1, ae2])

    forward_target = numx.array([[0.66395408, 0.65454106],
                                 [0.6444802, 0.62371954]])
    backward_target = numx.array([[0.70836512, 0.69824519],
                                  [0.69078643, 0.68180852]])
    rec_target = numx.array([[0.71608233, 0.70979876],
                             [0.71634822, 0.71010236]])

    def test_forward(self):
        sys.stdout.write(
            'Stacked auto encoder -> Performing forward backward prop test ...'
        )
        sys.stdout.flush()

        assert numx.sum(
            numx.abs(
                Test_SAE_Model.stack.forward_propagate(
                    numx.array([[1, 2], [3, 4]])) -
                Test_SAE_Model.forward_target)) < 0.000001

        print(' successfully passed!')
        sys.stdout.flush()

    def test_backward(self):
        sys.stdout.write(
            'Stacked auto encoder -> Performing forward backward prop test ...'
        )
        sys.stdout.flush()

        assert numx.sum(
            numx.abs(
                Test_SAE_Model.stack.backward_propagate(
                    numx.array([[1, 2], [3, 4]])) -
                Test_SAE_Model.backward_target)) < 0.000001

        print(' successfully passed!')
        sys.stdout.flush()

    def test_reconstruct(self):
        sys.stdout.write(
            'Stacked auto encoder -> Performing forward backward prop test ...'
        )
        sys.stdout.flush()

        assert numx.sum(
            numx.abs(
                Test_SAE_Model.stack.backward_propagate(
                    Test_SAE_Model.stack.forward_propagate(
                        numx.array([[1, 2], [3, 4]]))) -
                Test_SAE_Model.rec_target)) < 0.000001
        assert numx.sum(
            numx.abs(
                Test_SAE_Model.stack.reconstruct(numx.array([[1, 2], [3, 4]]))
                - Test_SAE_Model.rec_target)) < 0.000001
        print(' successfully passed!')
        sys.stdout.flush()
Esempio n. 3
0
# Split in tarining and test data
train_data = data[0:50000]
test_data = data[50000:70000]

# Set hyperparameters batchsize and number of epochs
batch_size = 10
max_epochs = 20

# Create model with sigmoid hidden units, linear output units, and squared error.
ae = aeModel.AutoEncoder(
    v1 * v2,
    h1 * h2,
    data=train_data,
    visible_activation_function=act.Identity(),
    hidden_activation_function=act.Sigmoid(),
    cost_function=cost.SquaredError(),
    initial_weights=0.01,
    initial_visible_bias=0.0,
    initial_hidden_bias=-2.0,
    # Set initially the units to be inactive, speeds up learning a little bit
    initial_visible_offsets=0.0,
    initial_hidden_offsets=0.02,
    dtype=numx.float64)

# Initialized gradient descent trainer
trainer = aeTrainer.GDTrainer(ae)

# Train model
print 'Training'
print 'Epoch\tRE train\t\tRE test\t\t\tSparsness train\t\tSparsness test '
for epoch in range(0, max_epochs + 1, 1):
Esempio n. 4
0
    def check_all(self, data, epsilon, contractive, sparseness, desired_sparseness, data_next, slowness_penalty):
        ''' Checks several possible combinations.

        '''
        N = data.shape[1]
        M = 2*data.shape[1]
        weights = numx.random.randn(N,M)*0.1
        bv = numx.random.randn(1,N)*0.1
        bh = numx.random.randn(1,M)*0.1
        ov = numx.random.random((1,N))
        oh = numx.random.random((1,M))
        for loss in [CFct.SquaredError,CFct.CrossEntropyError]:
            for act_in in [AFct.Identity,AFct.SoftPlus,AFct.Sigmoid,AFct.HyperbolicTangent,AFct.RadialBasis()]:
                for act_out in [AFct.Identity,AFct.SoftPlus,AFct.Sigmoid,AFct.HyperbolicTangent,AFct.RadialBasis()]:
                    if (loss != CFct.CrossEntropyError  or (loss == CFct.CrossEntropyError and act_in == AFct.Sigmoid)):
                        ae   =   MODEL.AutoEncoder(number_visibles = N,
                                               number_hiddens = M,
                                               data=None,
                                               visible_activation_function=act_in,
                                               hidden_activation_function=act_out,
                                               cost_function=loss,
                                               initial_weights=weights,
                                               initial_visible_bias=bv,
                                               initial_hidden_bias=bh,
                                               initial_visible_offsets=0,
                                               initial_hidden_offsets=0)
                        w,b,c = ae.finit_differences(data, 0.001, sparseness, desired_sparseness, contractive,
                                                       slowness_penalty,data_next)
                        maxW = numx.max(numx.abs(w))
                        maxb =  numx.max(numx.abs(b))
                        maxc =  numx.max(numx.abs(c))
                        if  maxW > 0.0001 or maxb > 0.0001 or maxc > 0.0001  :
                            print("Gradient check failed for ae with: ",)
                            print(" CENTERING ",loss," ",act_in," ",act_out)
                        assert numx.all(maxW < 0.0001)
                        assert numx.all(maxb < 0.0001)
                        assert numx.all(maxc < 0.0001)

                        ae   =   MODEL.AutoEncoder(number_visibles = N,
                                                   number_hiddens = M,
                                                   data=None,
                                                   visible_activation_function=act_in,
                                                   hidden_activation_function=act_out,
                                                   cost_function=loss,
                                                   initial_weights=weights,
                                                   initial_visible_bias=bv,
                                                   initial_hidden_bias=bh,
                                                   initial_visible_offsets=ov,
                                                   initial_hidden_offsets=oh)

                        w,b,c = ae.finit_differences(data, 0.001, sparseness, desired_sparseness, contractive,
                                                       slowness_penalty,data_next)
                        maxW = numx.max(numx.abs(w))
                        maxb =  numx.max(numx.abs(b))
                        maxc =  numx.max(numx.abs(c))
                        if  maxW > 0.0001 or maxb > 0.0001 or maxc > 0.0001  :
                            print("Gradient check failed for ae with: ",)
                            print(" CENTERING ",loss," ",act_in," ",act_out)
                            print(maxW,'\t',maxb,'\t',maxc)
                        assert numx.all(maxW < 0.0001)
                        assert numx.all(maxb < 0.0001)
                        assert numx.all(maxc < 0.0001)
Esempio n. 5
0
    def test_trainer(self):
        ''' Checks if Auto encoder converges in terms of rec error.

        '''
        sys.stdout.write(
            'Auto encoder -> Performing trainer convergences check ...')
        sys.stdout.flush()
        data = generate_bars_and_stripes_complete(4)
        data_next = numx.random.permutation(
            generate_bars_and_stripes_complete(4))

        for act_out in [
                AFct.Identity, AFct.SoftSign, AFct.Rectifier, AFct.SoftPlus,
                AFct.Sigmoid, AFct.HyperbolicTangent
        ]:
            for act_in in [AFct.Sigmoid]:
                ae = MODEL.AutoEncoder(number_visibles=16,
                                       number_hiddens=20,
                                       data=data,
                                       visible_activation_function=act_in,
                                       hidden_activation_function=act_out,
                                       cost_function=CFct.CrossEntropyError,
                                       initial_weights='AUTO',
                                       initial_visible_bias='AUTO',
                                       initial_hidden_bias='AUTO',
                                       initial_visible_offsets='AUTO',
                                       initial_hidden_offsets='AUTO')
                self.perform_training(ae=ae,
                                      data=data,
                                      epsilon=0.01,
                                      momentum=0.0,
                                      update_visible_offsets=0.0,
                                      update_hidden_offsets=0.0,
                                      corruptor=None,
                                      reg_L1Norm=0.0,
                                      reg_L2Norm=0.0,
                                      reg_sparseness=0.0,
                                      desired_sparseness=0.0,
                                      reg_contractive=0.0,
                                      reg_slowness=0.0,
                                      data_next=None,
                                      restrict_gradient=0.0,
                                      restriction_norm='Cols')

        for act_out in [
                AFct.Identity, AFct.SoftSign, AFct.Rectifier, AFct.SoftPlus,
                AFct.Sigmoid, AFct.HyperbolicTangent
        ]:
            for act_in in [
                    AFct.Identity, AFct.SoftSign, AFct.Rectifier,
                    AFct.SoftPlus, AFct.Sigmoid, AFct.HyperbolicTangent
            ]:
                ae = MODEL.AutoEncoder(number_visibles=16,
                                       number_hiddens=20,
                                       data=data,
                                       visible_activation_function=act_in,
                                       hidden_activation_function=act_out,
                                       cost_function=CFct.SquaredError,
                                       initial_weights='AUTO',
                                       initial_visible_bias='AUTO',
                                       initial_hidden_bias='AUTO',
                                       initial_visible_offsets='AUTO',
                                       initial_hidden_offsets='AUTO')
                self.perform_training(ae=ae,
                                      data=data,
                                      epsilon=0.01,
                                      momentum=0.0,
                                      update_visible_offsets=0.0,
                                      update_hidden_offsets=0.0,
                                      corruptor=None,
                                      reg_L1Norm=0.0,
                                      reg_L2Norm=0.0,
                                      reg_sparseness=0.0,
                                      desired_sparseness=0.0,
                                      reg_contractive=0.0,
                                      reg_slowness=0.0,
                                      data_next=None,
                                      restrict_gradient=0.0,
                                      restriction_norm='Cols')

        for act_out in [
                AFct.Identity, AFct.SoftSign, AFct.Rectifier, AFct.SoftPlus,
                AFct.Sigmoid, AFct.HyperbolicTangent
        ]:
            for act_in in [
                    AFct.Identity, AFct.SoftSign, AFct.Rectifier,
                    AFct.SoftPlus, AFct.Sigmoid, AFct.HyperbolicTangent
            ]:
                ae = MODEL.AutoEncoder(number_visibles=16,
                                       number_hiddens=20,
                                       data=data,
                                       visible_activation_function=act_in,
                                       hidden_activation_function=act_out,
                                       cost_function=CFct.AbsoluteError,
                                       initial_weights='AUTO',
                                       initial_visible_bias='AUTO',
                                       initial_hidden_bias='AUTO',
                                       initial_visible_offsets='AUTO',
                                       initial_hidden_offsets='AUTO')
                self.perform_training(ae=ae,
                                      data=data,
                                      epsilon=0.005,
                                      momentum=0.0,
                                      update_visible_offsets=0.0,
                                      update_hidden_offsets=0.0,
                                      corruptor=None,
                                      reg_L1Norm=0.0,
                                      reg_L2Norm=0.0,
                                      reg_sparseness=0.0,
                                      desired_sparseness=0.0,
                                      reg_contractive=0.0,
                                      reg_slowness=0.0,
                                      data_next=None,
                                      restrict_gradient=0.0,
                                      restriction_norm='Cols')

        # Normal
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=None,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets=0,
                               initial_hidden_offsets=0,
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.0,
                              update_visible_offsets=0.0,
                              update_hidden_offsets=0.0,
                              corruptor=None,
                              reg_L1Norm=0.000,
                              reg_L2Norm=0.000,
                              reg_sparseness=0.0,
                              desired_sparseness=0.1,
                              reg_contractive=0.0,
                              reg_slowness=0.0,
                              data_next=data_next,
                              restrict_gradient=None,
                              restriction_norm='Mat')
        # Centered
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=data,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets='AUTO',
                               initial_hidden_offsets='AUTO',
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.0,
                              update_visible_offsets=0.01,
                              update_hidden_offsets=0.01,
                              corruptor=None,
                              reg_L1Norm=0.000,
                              reg_L2Norm=0.000,
                              reg_sparseness=0.0,
                              desired_sparseness=0.1,
                              reg_contractive=0.0,
                              reg_slowness=0.0,
                              data_next=data_next,
                              restrict_gradient=None,
                              restriction_norm='Mat')
        # Momentum
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=None,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets='AUTO',
                               initial_hidden_offsets='AUTO',
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.9,
                              update_visible_offsets=0.01,
                              update_hidden_offsets=0.01,
                              corruptor=None,
                              reg_L1Norm=0.000,
                              reg_L2Norm=0.000,
                              reg_sparseness=0.0,
                              desired_sparseness=0.1,
                              reg_contractive=0.0,
                              reg_slowness=0.0,
                              data_next=data_next,
                              restrict_gradient=None,
                              restriction_norm='Mat')
        # L1 L2 Norm
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=data,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets='AUTO',
                               initial_hidden_offsets='AUTO',
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.0,
                              update_visible_offsets=0.01,
                              update_hidden_offsets=0.01,
                              corruptor=None,
                              reg_L1Norm=0.0002,
                              reg_L2Norm=0.0002,
                              reg_sparseness=0.0,
                              desired_sparseness=0.1,
                              reg_contractive=0.0,
                              reg_slowness=0.0,
                              data_next=data_next,
                              restrict_gradient=None,
                              restriction_norm='Mat')
        # Sparse
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=data,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets='AUTO',
                               initial_hidden_offsets='AUTO',
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.0,
                              update_visible_offsets=0.01,
                              update_hidden_offsets=0.01,
                              corruptor=None,
                              reg_L1Norm=0.0,
                              reg_L2Norm=0.0,
                              reg_sparseness=0.1,
                              desired_sparseness=0.1,
                              reg_contractive=0.0,
                              reg_slowness=0.0,
                              data_next=data_next,
                              restrict_gradient=None,
                              restriction_norm='Mat')
        # Contractive
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=data,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets='AUTO',
                               initial_hidden_offsets='AUTO',
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.0,
                              update_visible_offsets=0.01,
                              update_hidden_offsets=0.01,
                              corruptor=None,
                              reg_L1Norm=0.0,
                              reg_L2Norm=0.0,
                              reg_sparseness=0.0,
                              desired_sparseness=0.1,
                              reg_contractive=0.1,
                              reg_slowness=0.0,
                              data_next=data_next,
                              restrict_gradient=None,
                              restriction_norm='Mat')
        # Slowness
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=data,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets='AUTO',
                               initial_hidden_offsets='AUTO',
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.0,
                              update_visible_offsets=0.01,
                              update_hidden_offsets=0.01,
                              corruptor=None,
                              reg_L1Norm=0.0,
                              reg_L2Norm=0.0,
                              reg_sparseness=0.0,
                              desired_sparseness=0.0,
                              reg_contractive=0.0,
                              reg_slowness=0.1,
                              data_next=data_next,
                              restrict_gradient=None,
                              restriction_norm='Mat')
        # Restrict Mat
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=data,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets='AUTO',
                               initial_hidden_offsets='AUTO',
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.0,
                              update_visible_offsets=0.01,
                              update_hidden_offsets=0.01,
                              corruptor=None,
                              reg_L1Norm=0.0,
                              reg_L2Norm=0.0,
                              reg_sparseness=0.0,
                              desired_sparseness=0.0,
                              reg_contractive=0.0,
                              reg_slowness=0.0,
                              data_next=data_next,
                              restrict_gradient=0.1,
                              restriction_norm='Mat')
        # Restrict rows
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=data,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets='AUTO',
                               initial_hidden_offsets='AUTO',
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.0,
                              update_visible_offsets=0.01,
                              update_hidden_offsets=0.01,
                              corruptor=None,
                              reg_L1Norm=0.0,
                              reg_L2Norm=0.0,
                              reg_sparseness=0.0,
                              desired_sparseness=0.0,
                              reg_contractive=0.0,
                              reg_slowness=0.0,
                              data_next=data_next,
                              restrict_gradient=0.1,
                              restriction_norm='Rows')
        # Restrict Cols
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=data,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets='AUTO',
                               initial_hidden_offsets='AUTO',
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.0,
                              update_visible_offsets=0.01,
                              update_hidden_offsets=0.01,
                              corruptor=None,
                              reg_L1Norm=0.0,
                              reg_L2Norm=0.0,
                              reg_sparseness=0.0,
                              desired_sparseness=0.0,
                              reg_contractive=0.0,
                              reg_slowness=0.0,
                              data_next=data_next,
                              restrict_gradient=0.1,
                              restriction_norm='Cols')
        print(' successfully passed!')
        sys.stdout.flush()
Esempio n. 6
0
# Load data , get it from 'deeplearning.net/data/mnist/mnist.pkl.gz'
train_data, _, _, _, test_data, _ = io.load_mnist("../../data/mnist.pkl.gz",
                                                  False)

# Set hyperparameters batchsize and number of epochs
batch_size = 10
max_epochs = 10

# Create model with sigmoid hidden units, linear output units, and squared error loss.
ae = aeModel.AutoEncoder(v1 * v2,
                         h1 * h2,
                         data=train_data,
                         visible_activation_function=act.Sigmoid(),
                         hidden_activation_function=act.Sigmoid(),
                         cost_function=cost.CrossEntropyError(),
                         initial_weights='AUTO',
                         initial_visible_bias='AUTO',
                         initial_hidden_bias='AUTO',
                         initial_visible_offsets='AUTO',
                         initial_hidden_offsets='AUTO',
                         dtype=numx.float64)

# Initialized gradient descent trainer
trainer = aeTrainer.GDTrainer(ae)

# Train model
print 'Training'
print 'Epoch\tRE train\t\tRE test\t\t\tSparsness train\t\tSparsness test '
for epoch in range(0, max_epochs + 1, 1):

    # Shuffle data