Example #1
0
def test_plda_machine_log_likelihood_Python():

  # Data used for performing the tests
  # Features and subspaces dimensionality
  sigma = numpy.ndarray(C_dim_d, 'float64')
  sigma.fill(0.01)
  mu = numpy.ndarray(C_dim_d, 'float64')
  mu.fill(0)

  # Defines base machine
  mb = PLDABase(C_dim_d, C_dim_f, C_dim_g)
  # Sets the current mu, F, G and sigma
  mb.mu = mu
  mb.f = C_F
  mb.g = C_G
  mb.sigma = sigma

  # Defines machine
  m = PLDAMachine(mb)

  # Defines (random) samples and check compute_log_likelihood method
  ar_e = numpy.random.randn(2,C_dim_d)
  ar_p = numpy.random.randn(C_dim_d)
  ar_s = numpy.vstack([ar_e, ar_p])
  assert abs(m.compute_log_likelihood(ar_s, False) - compute_log_likelihood(ar_s, mu, C_F, C_G, sigma)) < 1e-10
  ar_p2d = numpy.reshape(ar_p, (1,C_dim_d))

  a = m.compute_log_likelihood(ar_p, False)

  assert abs(m.compute_log_likelihood(ar_p, False) - compute_log_likelihood(ar_p2d, mu, C_F, C_G, sigma)) < 1e-10

  # Defines (random) samples and check forward method
  ar2_e = numpy.random.randn(4,C_dim_d)
  ar2_p = numpy.random.randn(C_dim_d)
  ar2_s = numpy.vstack([ar2_e, ar2_p])
  m.log_likelihood = m.compute_log_likelihood(ar2_e, False)
  llr = m.compute_log_likelihood(ar2_s, True) - (m.compute_log_likelihood(ar2_s, False) + m.log_likelihood)
  assert abs(m(ar2_s) - llr) < 1e-10
  ar2_p2d = numpy.random.randn(3,C_dim_d)
  ar2_s2d = numpy.vstack([ar2_e, ar2_p2d])
  llr2d = m.compute_log_likelihood(ar2_s2d, True) - (m.compute_log_likelihood(ar2_s2d, False) + m.log_likelihood)
  assert abs(m(ar2_s2d) - llr2d) < 1e-10
Example #2
0
def test_plda_basemachine_loglikelihood_pointestimate():

  # Data used for performing the tests
  # Features and subspaces dimensionality
  sigma = numpy.ndarray(C_dim_d, 'float64')
  sigma.fill(0.01)
  mu = numpy.ndarray(C_dim_d, 'float64')
  mu.fill(0)
  xij = numpy.array([0.7, 1.3, 2.5, 0.3, 1.3, 2.7, 0.9])
  hi = numpy.array([-0.5, 0.5])
  wij = numpy.array([-0.1, 0.2, 0.3])

  m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
  # Sets the current mu, F, G and sigma
  m.mu = mu
  m.f = C_F
  m.g = C_G
  m.sigma = sigma

  #assert equals(m.compute_log_likelihood_point_estimate(xij, hi, wij), compute_log_likelihood_point_estimate(xij, mu, C_F, C_G, sigma, hi, wij), 1e-6)
  log_likelihood_point_estimate        = m.compute_log_likelihood_point_estimate(xij, hi, wij)
  log_likelihood_point_estimate_python = compute_log_likelihood_point_estimate(xij,         mu, C_F, C_G, sigma, hi, wij)
  assert equals(log_likelihood_point_estimate, log_likelihood_point_estimate_python, 1e-6)
Example #3
0
def test_plda_machine_log_likelihood_Prince():

  # Data used for performing the tests
  # Features and subspaces dimensionality
  D = 7
  nf = 2
  ng = 3

  # initial values for F, G and sigma
  G_init=numpy.array([-1.1424, -0.5044, -0.1917,
    -0.6249,  0.1021, -0.8658,
    -1.1687,  1.1963,  0.1807,
    0.3926,  0.1203,  1.2665,
    1.3018, -1.0368, -0.2512,
    -0.5936, -0.8571, -0.2046,
    0.4364, -0.1699, -2.2015]).reshape(D,ng)
  # F <-> PCA on G
  F_init=numpy.array([-0.054222647972093, -0.000000000783146,
    0.596449127693018,  0.000000006265167,
    0.298224563846509,  0.000000003132583,
    0.447336845769764,  0.000000009397750,
    -0.108445295944185, -0.000000001566292,
    -0.501559493741856, -0.000000006265167,
    -0.298224563846509, -0.000000003132583]).reshape(D,nf)
  sigma_init = 0.01 * numpy.ones((D,), 'float64')
  mean_zero = numpy.zeros((D,), 'float64')

  # base machine
  mb = PLDABase(D,nf,ng)
  mb.sigma = sigma_init
  mb.g = G_init
  mb.f = F_init
  mb.mu = mean_zero

  # Data for likelihood computation
  x1 = numpy.array([0.8032, 0.3503, 0.4587, 0.9511, 0.1330, 0.0703, 0.7061])
  x2 = numpy.array([0.9317, 0.1089, 0.6517, 0.1461, 0.6940, 0.6256, 0.0437])
  x3 = numpy.array([0.7979, 0.9862, 0.4367, 0.3447, 0.0488, 0.2252, 0.5810])
  X = numpy.ndarray((3,D), 'float64')
  X[0,:] = x1
  X[1,:] = x2
  X[2,:] = x3
  a = []
  a.append(x1)
  a.append(x2)
  a.append(x3)
  a = numpy.array(a)

  # reference likelihood from Prince implementation
  ll_ref = -182.8880743535197

  # machine
  m = PLDAMachine(mb)
  ll = m.compute_log_likelihood(X)
  assert abs(ll - ll_ref) < 1e-10

  # log likelihood ratio
  Y = numpy.ndarray((2,D), 'float64')
  Y[0,:] = x1
  Y[1,:] = x2
  Z = numpy.ndarray((1,D), 'float64')
  Z[0,:] = x3
  llX = m.compute_log_likelihood(X)
  llY = m.compute_log_likelihood(Y)
  llZ = m.compute_log_likelihood(Z)
  # reference obtained by computing the likelihood of [x1,x2,x3], [x1,x2]
  # and [x3] separately
  llr_ref = -4.43695386675
  assert abs((llX - (llY + llZ)) - llr_ref) < 1e-10
Example #4
0
def test_plda_machine():

  # Data used for performing the tests
  # Features and subspaces dimensionality
  sigma = numpy.ndarray(C_dim_d, 'float64')
  sigma.fill(0.01)
  mu = numpy.ndarray(C_dim_d, 'float64')
  mu.fill(0)

  # Defines base machine
  mb = PLDABase(C_dim_d, C_dim_f, C_dim_g)
  # Sets the current mu, F, G and sigma
  mb.mu = mu
  mb.f = C_F
  mb.g = C_G
  mb.sigma = sigma

  # Test constructors and dim getters
  m = PLDAMachine(mb)
  assert m.shape[0] == C_dim_d
  assert m.shape[1]== C_dim_f
  assert m.shape[2] == C_dim_g

  m0 = PLDAMachine(mb)
  #m0.plda_base = mb
  assert m0.shape[0]  == C_dim_d
  assert m0.shape[1]  == C_dim_f
  assert m0.shape[2]  == C_dim_g

  # Defines machine
  n_samples = 2
  WSumXitBetaXi = 0.37
  weightedSum = numpy.array([1.39,0.54], 'float64')
  log_likelihood = -0.22

  m.n_samples = n_samples
  m.w_sum_xit_beta_xi = WSumXitBetaXi
  m.weighted_sum = weightedSum
  m.log_likelihood = log_likelihood

  gamma3 = m.get_add_gamma(3).copy()
  constTerm3 = m.get_add_log_like_const_term(3)

  # Saves to file, loads and compares to original
  filename = str(tempfile.mkstemp(".hdf5")[1])
  m.save(bob.io.base.HDF5File(filename, 'w'))
  m_loaded = PLDAMachine(bob.io.base.HDF5File(filename), mb)

  # Compares the values loaded with the former ones
  assert m_loaded == m
  assert (m_loaded != m) is False
  assert abs(m_loaded.n_samples - n_samples) < 1e-10
  assert abs(m_loaded.w_sum_xit_beta_xi - WSumXitBetaXi) < 1e-10
  assert equals(m_loaded.weighted_sum, weightedSum, 1e-10)
  assert abs(m_loaded.log_likelihood - log_likelihood) < 1e-10
  assert m_loaded.has_gamma(3)
  assert equals(m_loaded.get_add_gamma(3), gamma3, 1e-10)
  assert equals(m_loaded.get_gamma(3), gamma3, 1e-10)
  assert m_loaded.has_log_like_const_term(3)
  assert abs(m_loaded.get_add_log_like_const_term(3) - constTerm3) < 1e-10
  assert abs(m_loaded.get_log_like_const_term(3) - constTerm3) < 1e-10

  # Test clear_maps method
  assert m_loaded.has_gamma(3)
  assert m_loaded.has_log_like_const_term(3)
  m_loaded.clear_maps()
  assert (m_loaded.has_gamma(3)) is False
  assert (m_loaded.has_log_like_const_term(3)) is False

  # Check exceptions
  #m_loaded2 = PLDAMachine(bob.io.base.HDF5File(filename))
  #m_loaded2.load(bob.io.base.HDF5File(filename))
  #nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'shape')
  #nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'dim_f')
  #nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'dim_g')
  #nose.tools.assert_raises(RuntimeError, m_loaded2.forward, [1.])
  #nose.tools.assert_raises(RuntimeError, m_loaded2.compute_log_likelihood, [1.])

  # Clean-up
  os.unlink(filename)
Example #5
0
def test_plda_basemachine():
  # Data used for performing the tests
  sigma = numpy.ndarray(C_dim_d, 'float64')
  sigma.fill(0.01)
  mu = numpy.ndarray(C_dim_d, 'float64')
  mu.fill(0)

  # Defines reference results based on matlab
  alpha_ref = numpy.array([ 0.002189051545735,  0.001127099941432,
    -0.000145483208153, 0.001127099941432,  0.003549267943741,
    -0.000552001405453, -0.000145483208153, -0.000552001405453,
    0.001440505362615], 'float64').reshape(C_dim_g, C_dim_g)
  beta_ref  = numpy.array([ 50.587191765140361, -14.512478352504877,
    -0.294799164567830,  13.382002504394316,  9.202063877660278,
    -43.182264846086497,  11.932345916716455, -14.512478352504878,
    82.320149045633045, -12.605578822979698,  19.618675892079366,
    13.033691341150439,  -8.004874490989799, -21.547363307109187,
    -0.294799164567832, -12.605578822979696,  52.123885798398241,
    4.363739008635009, 44.847177605628545,  16.438137537463710,
    5.137421840557050, 13.382002504394316,  19.618675892079366,
    4.363739008635011,  75.070401560513488, -4.515472972526140,
    9.752862741017488,  34.196127678931106, 9.202063877660285,
    13.033691341150439,  44.847177605628552,  -4.515472972526142,
    56.189416227691098,  -7.536676357632515, -10.555735414707383,
    -43.182264846086497,  -8.004874490989799,  16.438137537463703,
    9.752862741017490, -7.536676357632518,  56.430571485722126,
    9.471758169835317, 11.932345916716461, -21.547363307109187,
    5.137421840557051,  34.196127678931099, -10.555735414707385,
    9.471758169835320,  27.996266602110637], 'float64').reshape(C_dim_d, C_dim_d)
  gamma3_ref = numpy.array([ 0.005318799462241, -0.000000012993151,
    -0.000000012993151,  0.999999999999996], 'float64').reshape(C_dim_f, C_dim_f)

  # Constructor tests
  #m = PLDABase()
  #assert m.dim_d == 0
  #assert m.dim_f == 0
  #assert m.dim_g == 0
  #del m
  m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
  assert m.shape[0] == C_dim_d
  assert m.shape[1] == C_dim_f
  assert m.shape[2] == C_dim_g
  assert abs(m.variance_threshold - 0.) < 1e-10
  del m
  m = PLDABase(C_dim_d, C_dim_f, C_dim_g, 1e-2)
  assert m.shape[0] == C_dim_d
  assert m.shape[1] == C_dim_f
  assert m.shape[2] == C_dim_g
  assert abs(m.variance_threshold - 1e-2) < 1e-10
  del m

  # Defines base machine
  m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
  #m.resize(C_dim_d, C_dim_f, C_dim_g)
  # Sets the current mu, F, G and sigma
  m.mu = mu
  m.f = C_F
  m.g = C_G
  m.sigma = sigma
  gamma3 = m.get_add_gamma(3).copy()
  constTerm3 = m.get_add_log_like_const_term(3)

  # Compares precomputed values to matlab reference
  for ii in range(m.__alpha__.shape[0]):
    for jj in range(m.__alpha__.shape[1]):
      absdiff = abs(m.__alpha__[ii,jj]- alpha_ref[ii,jj])
      assert absdiff < 1e-10, 'PLDABase alpha matrix does not match reference at (%d,%d) to 10^-10: |%g-%g| = %g' % (ii, jj, m.__alpha__[ii,jj], alpha_ref[ii,jj], absdiff)
  assert equals(m.__alpha__, alpha_ref, 1e-10)
  assert equals(m.__beta__, beta_ref, 1e-10)
  assert equals(gamma3, gamma3_ref, 1e-10)

  # Compares precomputed values to the ones returned by python implementation
  assert equals(m.__isigma__, compute_i_sigma(sigma), 1e-10)
  assert equals(m.__alpha__, compute_alpha(C_G,sigma), 1e-10)
  assert equals(m.__beta__, compute_beta(C_G,sigma), 1e-10)
  assert equals(m.get_add_gamma(3), compute_gamma(C_F,C_G,sigma,3), 1e-10)
  assert m.has_gamma(3)
  assert equals(m.get_gamma(3), compute_gamma(C_F,C_G,sigma,3), 1e-10)
  assert equals(m.__ft_beta__, compute_ft_beta(C_F,C_G,sigma), 1e-10)
  assert equals(m.__gt_i_sigma__, compute_gt_i_sigma(C_G,sigma), 1e-10)
  assert math.fabs(m.__logdet_alpha__ - compute_logdet_alpha(C_G,sigma)) < 1e-10
  assert math.fabs(m.__logdet_sigma__ - compute_logdet_sigma(sigma)) < 1e-10
  assert abs(m.get_add_log_like_const_term(3) - compute_loglike_constterm(C_F,C_G,sigma,3)) < 1e-10
  assert m.has_log_like_const_term(3)
  assert abs(m.get_log_like_const_term(3) - compute_loglike_constterm(C_F,C_G,sigma,3)) < 1e-10

  # Defines base machine
  del m
  m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
  # Sets the current mu, F, G and sigma
  m.mu = mu
  m.f = C_F
  m.g = C_G
  m.sigma = sigma
  gamma3 = m.get_add_gamma(3).copy()
  constTerm3 = m.get_add_log_like_const_term(3)

  # Compares precomputed values to matlab reference
  assert equals(m.__alpha__, alpha_ref, 1e-10)
  assert equals(m.__beta__, beta_ref, 1e-10)
  assert equals(gamma3, gamma3_ref, 1e-10)

  # values before being saved
  isigma = m.__isigma__.copy()
  alpha = m.__alpha__.copy()
  beta = m.__beta__.copy()
  FtBeta = m.__ft_beta__.copy()
  GtISigma = m.__gt_i_sigma__.copy()
  logdetAlpha = m.__logdet_alpha__
  logdetSigma = m.__logdet_sigma__

  # Saves to file, loads and compares to original
  filename = str(tempfile.mkstemp(".hdf5")[1])
  m.save(bob.io.base.HDF5File(filename, 'w'))
  m_loaded = PLDABase(bob.io.base.HDF5File(filename))

  # Compares the values loaded with the former ones
  assert m_loaded == m
  assert (m_loaded != m) is False
  assert equals(m_loaded.mu, mu, 1e-10)
  assert equals(m_loaded.f, C_F, 1e-10)
  assert equals(m_loaded.g, C_G, 1e-10)
  assert equals(m_loaded.sigma, sigma, 1e-10)
  assert equals(m_loaded.__isigma__, isigma, 1e-10)
  assert equals(m_loaded.__alpha__, alpha, 1e-10)
  assert equals(m_loaded.__beta__, beta, 1e-10)
  assert equals(m_loaded.__ft_beta__, FtBeta, 1e-10)
  assert equals(m_loaded.__gt_i_sigma__, GtISigma, 1e-10)
  assert abs(m_loaded.__logdet_alpha__ - logdetAlpha) < 1e-10
  assert abs(m_loaded.__logdet_sigma__ - logdetSigma) < 1e-10
  assert m_loaded.has_gamma(3)
  assert equals(m_loaded.get_gamma(3), gamma3_ref, 1e-10)
  assert equals(m_loaded.get_add_gamma(3), gamma3_ref, 1e-10)
  assert m_loaded.has_log_like_const_term(3)
  assert abs(m_loaded.get_add_log_like_const_term(3) - constTerm3) < 1e-10

  # Compares the values loaded with the former ones when copying
  m_copy = PLDABase(m_loaded)
  assert m_loaded == m_copy
  assert (m_loaded != m_copy) is False
  # Test clear_maps method
  assert m_copy.has_gamma(3)
  assert m_copy.has_log_like_const_term(3)
  m_copy.clear_maps()
  assert (m_copy.has_gamma(3)) is False
  assert (m_copy.has_log_like_const_term(3)) is False

  # Check variance flooring thresholds-related methods
  v_zo = numpy.array([0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01])
  v_zo_ = 0.01
  v_zzo = numpy.array([0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001])
  v_zzo_ = 0.001
  m_copy.variance_threshold = v_zo_
  assert (m_loaded == m_copy) is False
  assert m_loaded != m_copy
  m_copy.variance_threshold = v_zzo_
  m_copy.sigma = v_zo
  assert equals(m_copy.sigma, v_zo, 1e-10)
  m_copy.variance_threshold = v_zo_
  m_copy.sigma = v_zzo
  assert equals(m_copy.sigma, v_zo, 1e-10)
  m_copy.variance_threshold = v_zzo_
  m_copy.sigma = v_zzo
  assert equals(m_copy.sigma, v_zzo, 1e-10)
  m_copy.variance_threshold = v_zo_
  assert equals(m_copy.sigma, v_zo, 1e-10)

  # Clean-up
  os.unlink(filename)
def test_plda_enrollment():
  # Data used for performing the tests
  # Features and subspaces dimensionality
  dim_d = 7
  dim_f = 2
  dim_g = 3

  # initial values for F, G and sigma
  G_init=numpy.array([-1.1424, -0.5044, -0.1917,
    -0.6249,  0.1021, -0.8658,
    -1.1687,  1.1963,  0.1807,
    0.3926,  0.1203,  1.2665,
    1.3018, -1.0368, -0.2512,
    -0.5936, -0.8571, -0.2046,
    0.4364, -0.1699, -2.2015]).reshape(dim_d,dim_g)
  # F <-> PCA on G
  F_init=numpy.array([-0.054222647972093, -0.000000000783146,
    0.596449127693018,  0.000000006265167,
    0.298224563846509,  0.000000003132583,
    0.447336845769764,  0.000000009397750,
    -0.108445295944185, -0.000000001566292,
    -0.501559493741856, -0.000000006265167,
    -0.298224563846509, -0.000000003132583]).reshape(dim_d,dim_f)
  sigma_init = 0.01 * numpy.ones((dim_d,), 'float64')
  mean_zero = numpy.zeros((dim_d,), 'float64')

  # base machine
  mb = PLDABase(dim_d,dim_f,dim_g)
  mb.sigma = sigma_init
  mb.g = G_init
  mb.f = F_init
  mb.mu = mean_zero

  # Data for likelihood computation
  x1 = numpy.array([0.8032, 0.3503, 0.4587, 0.9511, 0.1330, 0.0703, 0.7061])
  x2 = numpy.array([0.9317, 0.1089, 0.6517, 0.1461, 0.6940, 0.6256, 0.0437])
  x3 = numpy.array([0.7979, 0.9862, 0.4367, 0.3447, 0.0488, 0.2252, 0.5810])
  a_enroll = []
  a_enroll.append(x1)
  a_enroll.append(x2)
  a_enroll = numpy.array(a_enroll)

  # reference likelihood from Prince implementation
  ll_ref = -182.8880743535197

  # Computes the likelihood using x1 and x2 as enrollment samples
  # and x3 as a probe sample
  m = PLDAMachine(mb)
  t = PLDATrainer()
  t.enroll(m, a_enroll)
  ll = m.compute_log_likelihood(x3)

  assert abs(ll - ll_ref) < 1e-10

  # reference obtained by computing the likelihood of [x1,x2,x3], [x1,x2]
  # and [x3] separately
  llr_ref = -4.43695386675
  llr = m(x3)
  assert abs(llr - llr_ref) < 1e-10
  #
  llr_separate = m.compute_log_likelihood(numpy.array([x1,x2,x3]), False) - \
    (m.compute_log_likelihood(numpy.array([x1,x2]), False) + m.compute_log_likelihood(numpy.array([x3]), False))
  assert abs(llr - llr_separate) < 1e-10
def test_plda_EM_vs_Prince():
  # Data used for performing the tests
  # Features and subspaces dimensionality
  dim_d = 7
  dim_f = 2
  dim_g = 3

  # first identity (4 samples)
  a = numpy.array([
    [1,2,3,4,5,6,7],
    [7,8,3,3,1,8,2],
    [3,2,1,4,5,1,7],
    [9,0,3,2,1,4,6],
    ], dtype='float64')

  # second identity (3 samples)
  b = numpy.array([
    [5,6,3,4,2,0,2],
    [1,7,8,9,4,4,8],
    [8,7,2,5,1,1,1],
    ], dtype='float64')

  # list of arrays (training data)
  l = [a,b]

  # initial values for F, G and sigma
  G_init=numpy.array([-1.1424, -0.5044, -0.1917,
    -0.6249,  0.1021, -0.8658,
    -1.1687,  1.1963,  0.1807,
    0.3926,  0.1203,  1.2665,
    1.3018, -1.0368, -0.2512,
    -0.5936, -0.8571, -0.2046,
    0.4364, -0.1699, -2.2015]).reshape(dim_d,dim_g)

  # F <-> PCA on G
  F_init=numpy.array([-0.054222647972093, -0.000000000783146,
    0.596449127693018,  0.000000006265167,
    0.298224563846509,  0.000000003132583,
    0.447336845769764,  0.000000009397750,
    -0.108445295944185, -0.000000001566292,
    -0.501559493741856, -0.000000006265167,
    -0.298224563846509, -0.000000003132583]).reshape(dim_d,dim_f)
  sigma_init = 0.01 * numpy.ones(dim_d, 'float64')

  # Defines reference results based on Princes'matlab implementation
  # After 1 iteration
  z_first_order_a_1 = numpy.array(
    [-2.624115900658397, -0.000000034277848,  1.554823055585319,  0.627476234024656, -0.264705934182394,
     -2.624115900658397, -0.000000034277848, -2.703482671599357, -1.533283607433197,  0.553725774828231,
     -2.624115900658397, -0.000000034277848,  2.311647528461115,  1.266362142140170, -0.317378177105131,
     -2.624115900658397, -0.000000034277848, -1.163402640008200, -0.372604542926019,  0.025152800097991
    ]).reshape(4, dim_f+dim_g)
  z_first_order_b_1 = numpy.array(
    [ 3.494168818797438,  0.000000045643026,  0.111295550530958, -0.029241422535725,  0.257045446451067,
      3.494168818797438,  0.000000045643026,  1.102110715965762,  1.481232954001794, -0.970661225144399,
      3.494168818797438,  0.000000045643026, -1.212854031699468, -1.435946529317718,  0.717884143973377
    ]).reshape(3, dim_f+dim_g)

  z_second_order_sum_1 = numpy.array(
    [64.203518285366087,  0.000000747228248,  0.002703277337642,  0.078542842475345,  0.020894328259862,
      0.000000747228248,  6.999999999999980, -0.000000003955962,  0.000000002017232, -0.000000003741593,
      0.002703277337642, -0.000000003955962, 19.136889380923918, 11.860493771107487, -4.584339465366988,
      0.078542842475345,  0.000000002017232, 11.860493771107487,  8.771502339750128, -3.905706024997424,
      0.020894328259862, -0.000000003741593, -4.584339465366988, -3.905706024997424,  2.011924970338584
    ]).reshape(dim_f+dim_g, dim_f+dim_g)

  sigma_1 = numpy.array(
      [2.193659969999207, 3.748361365521041, 0.237835235737085,
        0.558546035892629, 0.209272700958400, 1.717782807724451,
        0.248414618308223])

  F_1 = numpy.array(
      [-0.059083416465692,  0.000000000751007,
        0.600133217253169,  0.000000006957266,
        0.302789123922871,  0.000000000218947,
        0.454540641429714,  0.000000003342540,
        -0.106608957780613, -0.000000001641389,
        -0.494267694269430, -0.000000011059552,
        -0.295956102084270, -0.000000006718366]).reshape(dim_d,dim_f)

  G_1 = numpy.array(
      [-1.836166150865047,  2.491475145758734,  5.095958946372235,
        -0.608732205531767, -0.618128420353493, -1.085423135463635,
        -0.697390472635929, -1.047900122276840, -6.080211153116984,
        0.769509301515319, -2.763610156675313, -5.972172587527176,
        1.332474692714491, -1.368103875407414, -2.096382536513033,
        0.304135903830416, -5.168096082564016, -9.604769461465978,
        0.597445549865284, -1.347101803379971, -5.900246013340080]).reshape(dim_d,dim_g)

  # After 2 iterations
  z_first_order_a_2 = numpy.array(
      [-2.144344161196005, -0.000000027851878,  1.217776189037369,  0.232492571855061, -0.212892893868819,
        -2.144344161196005, -0.000000027851878, -2.382647766948079, -1.759951013670071,  0.587213207926731,
        -2.144344161196005, -0.000000027851878,  2.143294830538722,  0.909307594408923, -0.183752098508072,
        -2.144344161196005, -0.000000027851878, -0.662558006326892,  0.717992497547010, -0.202897892977004
    ]).reshape(4, dim_f+dim_g)
  z_first_order_b_2 = numpy.array(
      [ 2.695117129662246,  0.000000035005543, -0.156173294945791, -0.123083763746364,  0.271123341933619,
        2.695117129662246,  0.000000035005543,  0.690321563509753,  0.944473716646212, -0.850835940962492,
        2.695117129662246,  0.000000035005543, -0.930970138998433, -0.949736472690315,  0.594216348861889
    ]).reshape(3, dim_f+dim_g)

  z_second_order_sum_2 = numpy.array(
      [41.602421167226410,  0.000000449434708, -1.513391506933811, -0.477818674270533,  0.059260102368316,
        0.000000449434708,  7.000000000000005, -0.000000023255959, -0.000000005157439, -0.000000003230262,
        -1.513391506933810, -0.000000023255959, 14.399631061987494,  8.068678077509025, -3.227586434905497,
        -0.477818674270533, -0.000000005157439,  8.068678077509025,  7.263248678863863, -3.060665688064639,
        0.059260102368316, -0.000000003230262, -3.227586434905497, -3.060665688064639,  1.705174220723198
    ]).reshape(dim_f+dim_g, dim_f+dim_g)

  sigma_2 = numpy.array(
    [1.120493935052524, 1.777598857891599, 0.197579528599150,
      0.407657093211478, 0.166216300651473, 1.044336960403809,
      0.287856936559308])

  F_2 = numpy.array(
    [-0.111956311978966,  0.000000000781025,
      0.702502767389263,  0.000000007683917,
      0.337823622542517,  0.000000000637302,
      0.551363737526339,  0.000000004854293,
     -0.096561040511417, -0.000000001716011,
     -0.661587484803602, -0.000000012394362,
     -0.346593051621620, -0.000000007134046]).reshape(dim_d,dim_f)

  G_2 = numpy.array(
    [-2.266404374274820,  4.089199685832099,  7.023039382876370,
      0.094887459097613, -3.226829318470136, -3.452279917194724,
     -0.498398131733141, -1.651712333649899, -6.548008210704172,
      0.574932298590327, -2.198978667003715, -5.131253543126156,
      1.415857426810629, -1.627795701160212, -2.509013676007012,
     -0.543552834305580, -3.215063993186718, -7.006305082499653,
      0.562108137758111, -0.785296641855087, -5.318335345720314]).reshape(dim_d,dim_g)

  # Runs the PLDA trainer EM-steps (2 steps)

  # Defines base trainer and machine
  t = PLDATrainer()
  t0 = PLDATrainer(t)
  m = PLDABase(dim_d,dim_f,dim_g)
  t.initialize(m,l)
  m.sigma = sigma_init
  m.g = G_init
  m.f = F_init

  # Defines base trainer and machine (for Python implementation
  t_py = PythonPLDATrainer()
  m_py = PLDABase(dim_d,dim_f,dim_g)
  t_py.initialize(m_py,l)
  m_py.sigma = sigma_init
  m_py.g = G_init
  m_py.f = F_init

  # E-step 1
  t.e_step(m,l)
  t_py.e_step(m_py,l)
  # Compares statistics to Prince matlab reference
  assert numpy.allclose(t.z_first_order[0], z_first_order_a_1, 1e-10)
  assert numpy.allclose(t.z_first_order[1], z_first_order_b_1, 1e-10)
  assert numpy.allclose(t.z_second_order_sum, z_second_order_sum_1, 1e-10)
  # Compares statistics against the ones of the python implementation
  assert numpy.allclose(t.z_first_order[0], t_py.m_z_first_order[0], 1e-10)
  assert numpy.allclose(t.z_first_order[1], t_py.m_z_first_order[1], 1e-10)
  assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10)

  # M-step 1
  t.m_step(m,l)
  t_py.m_step(m_py,l)
  # Compares F, G and sigma to Prince matlab reference
  assert numpy.allclose(m.f, F_1, 1e-10)
  assert numpy.allclose(m.g, G_1, 1e-10)
  assert numpy.allclose(m.sigma, sigma_1, 1e-10)
  # Compares F, G and sigma to the ones of the python implementation
  assert numpy.allclose(m.f, m_py.f, 1e-10)
  assert numpy.allclose(m.g, m_py.g, 1e-10)
  assert numpy.allclose(m.sigma, m_py.sigma, 1e-10)

  # E-step 2
  t.e_step(m,l)
  t_py.e_step(m_py,l)
  # Compares statistics to Prince matlab reference
  assert numpy.allclose(t.z_first_order[0], z_first_order_a_2, 1e-10)
  assert numpy.allclose(t.z_first_order[1], z_first_order_b_2, 1e-10)
  assert numpy.allclose(t.z_second_order_sum, z_second_order_sum_2, 1e-10)
  # Compares statistics against the ones of the python implementation
  assert numpy.allclose(t.z_first_order[0], t_py.m_z_first_order[0], 1e-10)
  assert numpy.allclose(t.z_first_order[1], t_py.m_z_first_order[1], 1e-10)
  assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10)

  # M-step 2
  t.m_step(m,l)
  t_py.m_step(m_py,l)
  # Compares F, G and sigma to Prince matlab reference
  assert numpy.allclose(m.f, F_2, 1e-10)
  assert numpy.allclose(m.g, G_2, 1e-10)
  assert numpy.allclose(m.sigma, sigma_2, 1e-10)
  # Compares F, G and sigma to the ones of the python implementation
  assert numpy.allclose(m.f, m_py.f, 1e-10)
  assert numpy.allclose(m.g, m_py.g, 1e-10)
  assert numpy.allclose(m.sigma, m_py.sigma, 1e-10)


  # Test the second order statistics computation
  # Calls the initialization methods and resets randomly initialized values
  # to new reference ones (to make the tests deterministic)
  t.use_sum_second_order = False
  t.initialize(m,l)
  m.sigma = sigma_init
  m.g = G_init
  m.f = F_init
  t_py.initialize(m_py,l)
  m_py.sigma = sigma_init
  m_py.g = G_init
  m_py.f = F_init

  # E-step 1
  t.e_step(m,l)
  t_py.e_step(m_py,l)
  # Compares statistics to Prince matlab reference
  assert numpy.allclose(t.z_first_order[0], z_first_order_a_1, 1e-10)
  assert numpy.allclose(t.z_first_order[1], z_first_order_b_1, 1e-10)
  # Compares statistics against the ones of the python implementation
  assert numpy.allclose(t.z_first_order[0], t_py.m_z_first_order[0], 1e-10)
  assert numpy.allclose(t.z_first_order[1], t_py.m_z_first_order[1], 1e-10)
  assert numpy.allclose(t.z_second_order[0], t_py.m_z_second_order[0], 1e-10)
  assert numpy.allclose(t.z_second_order[1], t_py.m_z_second_order[1], 1e-10)
  assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10)

  # M-step 1
  t.m_step(m,l)
  t_py.m_step(m_py,l)
  # Compares F, G and sigma to the ones of the python implementation
  assert numpy.allclose(m.f, m_py.f, 1e-10)
  assert numpy.allclose(m.g, m_py.g, 1e-10)
  assert numpy.allclose(m.sigma, m_py.sigma, 1e-10)

  # E-step 2
  t.e_step(m,l)
  t_py.e_step(m_py,l)
  # Compares statistics to Prince matlab reference
  assert numpy.allclose(t.z_first_order[0], z_first_order_a_2, 1e-10)
  assert numpy.allclose(t.z_first_order[1], z_first_order_b_2, 1e-10)
  # Compares statistics against the ones of the python implementation
  assert numpy.allclose(t.z_first_order[0], t_py.m_z_first_order[0], 1e-10)
  assert numpy.allclose(t.z_first_order[1], t_py.m_z_first_order[1], 1e-10)
  assert numpy.allclose(t.z_second_order[0], t_py.m_z_second_order[0], 1e-10)
  assert numpy.allclose(t.z_second_order[1], t_py.m_z_second_order[1], 1e-10)
  assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10)

  # M-step 2
  t.m_step(m,l)
  t_py.m_step(m_py,l)
  # Compares F, G and sigma to the ones of the python implementation
  assert numpy.allclose(m.f, m_py.f, 1e-10)
  assert numpy.allclose(m.g, m_py.g, 1e-10)
  assert numpy.allclose(m.sigma, m_py.sigma, 1e-10)

  #testing exceptions
  nose.tools.assert_raises(RuntimeError, t.initialize, m, [1,2,2])
  nose.tools.assert_raises(RuntimeError, t.e_step, m, [1,2,2])
  nose.tools.assert_raises(RuntimeError, t.m_step, m, [1,2,2])