Beispiel #1
0
def test_JFATrainer_updateZandD():
    # test the JFATrainer for updating Z and D

    d_ref = numpy.array([0.3110, 1.0138, 0.8297, 1.0382, 0.0095, 0.6320])

    z1 = numpy.array([0., 0., 0., 0., 0., 0.])
    z2 = numpy.array([0., 0., 0., 0., 0., 0.])
    z3_ref = numpy.array([0.3256, 1.8633, 0.6480, 0.8085, -0.0432, 0.2885])
    z4_ref = numpy.array([-0.3324, -0.1474, -0.4404, -0.4529, 0.0484, -0.5848])
    z = [z1, z2]

    # call the updateZ function
    ubm = GMMMachine(2, 3)
    ubm.mean_supervector = UBM_MEAN
    ubm.variance_supervector = UBM_VAR
    m = JFABase(ubm, 2, 2)
    t = JFATrainer()
    t.initialize(m, TRAINING_STATS)
    m.u = M_u
    m.v = M_v
    m.d = M_d
    t.__X__ = M_x
    t.__Y__ = M_y
    t.__Z__ = z
    t.e_step_d(m, TRAINING_STATS)
    t.m_step_d(m, TRAINING_STATS)

    # Expected results(JFA cookbook, matlab)
    assert equals(t.__Z__[0], z3_ref, 2e-4)
    assert equals(t.__Z__[1], z4_ref, 2e-4)
    assert equals(m.d, d_ref, 2e-4)
Beispiel #2
0
def test_JFATrainer_updateYandV():
    # test the JFATrainer for updating Y and V

    v_ref = numpy.array([
        0.7228, 0.7892, 0.6475, 0.6080, 0.8631, 0.8416, 1.6512, 1.6068, 0.0500,
        0.0101, 0.4325, 0.6719
    ]).reshape((6, 2))

    y1 = numpy.array([0., 0.])
    y2 = numpy.array([0., 0.])
    y3 = numpy.array([0.9630, 1.3868])
    y4 = numpy.array([0.0426, -0.3721])
    y = [y1, y2]

    # call the updateY function
    ubm = GMMMachine(2, 3)
    ubm.mean_supervector = UBM_MEAN
    ubm.variance_supervector = UBM_VAR
    m = JFABase(ubm, 2, 2)
    t = JFATrainer()
    t.initialize(m, TRAINING_STATS)
    m.u = M_u
    m.v = M_v
    m.d = M_d
    t.__X__ = M_x
    t.__Y__ = y
    t.__Z__ = M_z
    t.e_step_v(m, TRAINING_STATS)
    t.m_step_v(m, TRAINING_STATS)

    # Expected results(JFA cookbook, matlab)
    assert equals(t.__Y__[0], y3, 2e-4)
    assert equals(t.__Y__[1], y4, 2e-4)
    assert equals(m.v, v_ref, 2e-4)
Beispiel #3
0
def test_JFATrainer_updateXandU():
    # test the JFATrainer for updating X and U

    u_ref = numpy.array([
        0.6729, 0.3408, 0.0544, 1.0653, 0.5399, 1.3035, 2.4995, 0.4385, 0.1292,
        -0.0576, 1.1962, 0.0117
    ]).reshape((6, 2))

    x1 = numpy.array([0., 0., 0., 0.]).reshape((2, 2))
    x2 = numpy.array([0., 0., 0., 0.]).reshape((2, 2))
    x3 = numpy.array([0.2143, 1.8275, 3.1979, 0.1227]).reshape((2, 2))
    x4 = numpy.array([-1.3861, 0.2359, 5.3326, -0.7914]).reshape((2, 2))
    x = [x1, x2]

    # call the updateX function
    ubm = GMMMachine(2, 3)
    ubm.mean_supervector = UBM_MEAN
    ubm.variance_supervector = UBM_VAR
    m = JFABase(ubm, 2, 2)
    t = JFATrainer()
    t.initialize(m, TRAINING_STATS)
    m.u = M_u
    m.v = M_v
    m.d = M_d
    t.__X__ = x
    t.__Y__ = M_y
    t.__Z__ = M_z
    t.e_step_u(m, TRAINING_STATS)
    t.m_step_u(m, TRAINING_STATS)

    # Expected results(JFA cookbook, matlab)
    assert equals(t.__X__[0], x3, 2e-4)
    assert equals(t.__X__[1], x4, 2e-4)
    assert equals(m.u, u_ref, 2e-4)
def test_JFATrainer_updateYandV():
  # test the JFATrainer for updating Y and V

  v_ref = numpy.array( [0.7228, 0.7892, 0.6475, 0.6080, 0.8631, 0.8416,
    1.6512, 1.6068, 0.0500, 0.0101, 0.4325, 0.6719]).reshape((6,2))

  y1 = numpy.array([0., 0.])
  y2 = numpy.array([0., 0.])
  y3 = numpy.array([0.9630, 1.3868])
  y4 = numpy.array([0.0426, -0.3721])
  y=[y1, y2]

  # call the updateY function
  ubm = GMMMachine(2,3)
  ubm.mean_supervector = UBM_MEAN
  ubm.variance_supervector = UBM_VAR
  m = JFABase(ubm,2,2)
  t = JFATrainer()
  t.initialize(m, TRAINING_STATS)
  m.u = M_u
  m.v = M_v
  m.d = M_d
  t.__X__ = M_x
  t.__Y__ = y
  t.__Z__ = M_z
  t.e_step_v(m, TRAINING_STATS)
  t.m_step_v(m, TRAINING_STATS)

  # Expected results(JFA cookbook, matlab)
  assert equals(t.__Y__[0], y3, 2e-4)
  assert equals(t.__Y__[1], y4, 2e-4)
  assert equals(m.v, v_ref, 2e-4)
def test_JFATrainer_updateZandD():
  # test the JFATrainer for updating Z and D

  d_ref = numpy.array([0.3110, 1.0138, 0.8297, 1.0382, 0.0095, 0.6320])

  z1 = numpy.array([0., 0., 0., 0., 0., 0.])
  z2 = numpy.array([0., 0., 0., 0., 0., 0.])
  z3_ref = numpy.array([0.3256, 1.8633, 0.6480, 0.8085, -0.0432, 0.2885])
  z4_ref = numpy.array([-0.3324, -0.1474, -0.4404, -0.4529, 0.0484, -0.5848])
  z=[z1, z2]

  # call the updateZ function
  ubm = GMMMachine(2,3)
  ubm.mean_supervector = UBM_MEAN
  ubm.variance_supervector = UBM_VAR
  m = JFABase(ubm,2,2)
  t = JFATrainer()
  t.initialize(m, TRAINING_STATS)
  m.u = M_u
  m.v = M_v
  m.d = M_d
  t.__X__ = M_x
  t.__Y__ = M_y
  t.__Z__ = z
  t.e_step_d(m, TRAINING_STATS)
  t.m_step_d(m, TRAINING_STATS)

  # Expected results(JFA cookbook, matlab)
  assert equals(t.__Z__[0], z3_ref, 2e-4)
  assert equals(t.__Z__[1], z4_ref, 2e-4)
  assert equals(m.d, d_ref, 2e-4)
def test_JFATrainer_updateXandU():
  # test the JFATrainer for updating X and U

  u_ref = numpy.array( [0.6729, 0.3408, 0.0544, 1.0653, 0.5399, 1.3035,
    2.4995, 0.4385, 0.1292, -0.0576, 1.1962, 0.0117]).reshape((6,2))

  x1 = numpy.array([0., 0., 0., 0.]).reshape((2,2))
  x2 = numpy.array([0., 0., 0., 0.]).reshape((2,2))
  x3 = numpy.array([0.2143, 1.8275, 3.1979, 0.1227]).reshape((2,2))
  x4 = numpy.array([-1.3861, 0.2359, 5.3326, -0.7914]).reshape((2,2))
  x  = [x1, x2]

  # call the updateX function
  ubm = GMMMachine(2,3)
  ubm.mean_supervector = UBM_MEAN
  ubm.variance_supervector = UBM_VAR
  m = JFABase(ubm,2,2)
  t = JFATrainer()
  t.initialize(m, TRAINING_STATS)
  m.u = M_u
  m.v = M_v
  m.d = M_d
  t.__X__ = x
  t.__Y__ = M_y
  t.__Z__ = M_z
  t.e_step_u(m, TRAINING_STATS)
  t.m_step_u(m, TRAINING_STATS)

  # Expected results(JFA cookbook, matlab)
  assert equals(t.__X__[0], x3, 2e-4)
  assert equals(t.__X__[1], x4, 2e-4)
  assert equals(m.u, u_ref, 2e-4)
Beispiel #7
0
def test_JFATrainInitialize():
    # Check that the initialization is consistent and using the rng (cf. issue #118)

    eps = 1e-10

    # UBM GMM
    ubm = GMMMachine(2, 3)
    ubm.mean_supervector = UBM_MEAN
    ubm.variance_supervector = UBM_VAR

    ## JFA
    jb = JFABase(ubm, 2, 2)
    # first round
    rng = bob.core.random.mt19937(0)
    jt = JFATrainer()
    #jt.rng = rng
    jt.initialize(jb, TRAINING_STATS, rng)
    u1 = jb.u
    v1 = jb.v
    d1 = jb.d

    # second round
    rng = bob.core.random.mt19937(0)
    jt.initialize(jb, TRAINING_STATS, rng)
    u2 = jb.u
    v2 = jb.v
    d2 = jb.d

    assert numpy.allclose(u1, u2, eps)
    assert numpy.allclose(v1, v2, eps)
    assert numpy.allclose(d1, d2, eps)
Beispiel #8
0
def test_JFABase():

  # Creates a UBM
  weights = numpy.array([0.4, 0.6], 'float64')
  means = numpy.array([[1, 6, 2], [4, 3, 2]], 'float64')
  variances = numpy.array([[1, 2, 1], [2, 1, 2]], 'float64')
  ubm = GMMMachine(2,3)
  ubm.weights = weights
  ubm.means = means
  ubm.variances = variances

  # Creates a JFABase
  U = numpy.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]], 'float64')
  V = numpy.array([[6, 5], [4, 3], [2, 1], [1, 2], [3, 4], [5, 6]], 'float64')
  d = numpy.array([0, 1, 0, 1, 0, 1], 'float64')
  m = JFABase(ubm, ru=1, rv=1)

  _,_,ru,rv = m.shape
  assert ru == 1
  assert rv == 1

  # Checks for correctness
  m.resize(2,2)
  m.u = U
  m.v = V
  m.d = d
  n_gaussians,dim,ru,rv = m.shape
  supervector_length    = m.supervector_length

  assert (m.u == U).all()
  assert (m.v == V).all()
  assert (m.d == d).all()
  assert n_gaussians        == 2
  assert dim                == 3
  assert supervector_length == 6
  assert ru                 == 2
  assert rv                 == 2

  # Saves and loads
  filename = str(tempfile.mkstemp(".hdf5")[1])
  m.save(bob.io.base.HDF5File(filename, 'w'))
  m_loaded = JFABase(bob.io.base.HDF5File(filename))
  m_loaded.ubm = ubm
  assert m == m_loaded
  assert (m != m_loaded) is False
  assert m.is_similar_to(m_loaded)

  # Copy constructor
  mc = JFABase(m)
  assert m == mc

  # Variant
  #mv = JFABase()
  # Checks for correctness
  #mv.ubm = ubm
  #mv.resize(2,2)
  #mv.u = U
  #mv.v = V
  #mv.d = d
  #assert (m.u == U).all()
  #assert (m.v == V).all()
  #assert (m.d == d).all()
  #assert m.dim_c == 2
  #assert m.dim_d == 3
  #assert m.dim_cd == 6
  #assert m.dim_ru == 2
  #assert m.dim_rv == 2

  # Clean-up
  os.unlink(filename)
Beispiel #9
0
def test_JFAMachine():

  # Creates a UBM
  weights   = numpy.array([0.4, 0.6], 'float64')
  means     = numpy.array([[1, 6, 2], [4, 3, 2]], 'float64')
  variances = numpy.array([[1, 2, 1], [2, 1, 2]], 'float64')
  ubm           = GMMMachine(2,3)
  ubm.weights   = weights
  ubm.means     = means
  ubm.variances = variances

  # Creates a JFABase
  U = numpy.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]], 'float64')
  V = numpy.array([[6, 5], [4, 3], [2, 1], [1, 2], [3, 4], [5, 6]], 'float64')
  d = numpy.array([0, 1, 0, 1, 0, 1], 'float64')
  base = JFABase(ubm,2,2)
  base.u = U
  base.v = V
  base.d = d

  # Creates a JFAMachine
  y = numpy.array([1,2], 'float64')
  z = numpy.array([3,4,1,2,0,1], 'float64')
  m = JFAMachine(base)
  m.y = y
  m.z = z
  n_gaussians,dim,ru,rv = m.shape
  supervector_length    = m.supervector_length

  assert n_gaussians        == 2
  assert dim                == 3
  assert supervector_length == 6
  assert ru                 == 2
  assert rv                 == 2
  assert (m.y == y).all()
  assert (m.z == z).all()

  # Saves and loads
  filename = str(tempfile.mkstemp(".hdf5")[1])
  m.save(bob.io.base.HDF5File(filename, 'w'))
  m_loaded = JFAMachine(bob.io.base.HDF5File(filename))
  m_loaded.jfa_base = base
  assert m == m_loaded
  assert (m != m_loaded) is False
  assert m.is_similar_to(m_loaded)

  # Copy constructor
  mc = JFAMachine(m)
  assert m == mc

  # Variant
  #mv = JFAMachine()
  # Checks for correctness
  #mv.jfa_base = base
  #m.y = y
  #m.z = z
  #assert m.dim_c == 2
  #assert m.dim_d == 3
  #assert m.dim_cd == 6
  #assert m.dim_ru == 2
  #assert m.dim_rv == 2
  #assert (m.y == y).all()
  #assert (m.z == z).all()

  # Defines GMMStats
  gs = GMMStats(2,3)
  log_likelihood = -3.
  T = 1
  n = numpy.array([0.4, 0.6], 'float64')
  sumpx = numpy.array([[1., 2., 3.], [4., 5., 6.]], 'float64')
  sumpxx = numpy.array([[10., 20., 30.], [40., 50., 60.]], 'float64')
  gs.log_likelihood = log_likelihood
  gs.t = T
  gs.n = n
  gs.sum_px = sumpx
  gs.sum_pxx = sumpxx

  # Forward GMMStats and check estimated value of the x speaker factor
  eps = 1e-10
  x_ref = numpy.array([0.291042849767692, 0.310273618998444], 'float64')
  score_ref = -2.111577181208289
  score = m.log_likelihood(gs)
  assert numpy.allclose(m.x, x_ref, eps)
  assert abs(score_ref-score) < eps

  # x and Ux
  x = numpy.ndarray((2,), numpy.float64)
  m.estimate_x(gs, x)
  n_gaussians, dim,_,_ = m.shape
  x_py = estimate_x(n_gaussians, dim, ubm.mean_supervector, ubm.variance_supervector, U, n, sumpx)
  assert numpy.allclose(x, x_py, eps)

  ux = numpy.ndarray((6,), numpy.float64)
  m.estimate_ux(gs, ux)
  n_gaussians, dim,_,_ = m.shape
  ux_py = estimate_ux(n_gaussians, dim, ubm.mean_supervector, ubm.variance_supervector, U, n, sumpx)
  assert numpy.allclose(ux, ux_py, eps)
  assert numpy.allclose(m.x, x, eps)

  score = m.forward_ux(gs, ux)

  assert abs(score_ref-score) < eps

  # Clean-up
  os.unlink(filename)
Beispiel #10
0
def test_JFATrainAndEnrol():
    # Train and enroll a JFAMachine

    # Calls the train function
    ubm = GMMMachine(2, 3)
    ubm.mean_supervector = UBM_MEAN
    ubm.variance_supervector = UBM_VAR
    mb = JFABase(ubm, 2, 2)
    t = JFATrainer()
    t.initialize(mb, TRAINING_STATS)
    mb.u = M_u
    mb.v = M_v
    mb.d = M_d
    bob.learn.em.train_jfa(t, mb, TRAINING_STATS, initialize=False)

    v_ref = numpy.array([[0.245364911936476, 0.978133261775424],
                         [0.769646805052223, 0.940070736856596],
                         [0.310779202800089, 1.456332053893072],
                         [0.184760934399551, 2.265139705602147],
                         [0.701987784039800, 0.081632150899400],
                         [0.074344030229297, 1.090248340917255]], 'float64')
    u_ref = numpy.array([[0.049424652628448, 0.060480486336896],
                         [0.178104127464007, 1.884873813495153],
                         [1.204011484266777, 2.281351307871720],
                         [7.278512126426286, -0.390966087173334],
                         [-0.084424326581145, -0.081725474934414],
                         [4.042143689831097, -0.262576386580701]], 'float64')
    d_ref = numpy.array([
        9.648467e-18, 2.63720683155e-12, 2.11822157653706e-10, 9.1047243e-17,
        1.41163442535567e-10, 3.30581e-19
    ], 'float64')

    eps = 1e-10
    assert numpy.allclose(mb.v, v_ref, eps)
    assert numpy.allclose(mb.u, u_ref, eps)
    assert numpy.allclose(mb.d, d_ref, eps)

    # Calls the enroll function
    m = JFAMachine(mb)

    Ne = numpy.array([0.1579, 0.9245, 0.1323, 0.2458]).reshape((2, 2))
    Fe = numpy.array([
        0.1579, 0.1925, 0.3242, 0.1234, 0.2354, 0.2734, 0.2514, 0.5874, 0.3345,
        0.2463, 0.4789, 0.5236
    ]).reshape((6, 2))
    gse1 = GMMStats(2, 3)
    gse1.n = Ne[:, 0]
    gse1.sum_px = Fe[:, 0].reshape(2, 3)
    gse2 = GMMStats(2, 3)
    gse2.n = Ne[:, 1]
    gse2.sum_px = Fe[:, 1].reshape(2, 3)

    gse = [gse1, gse2]
    t.enroll(m, gse, 5)

    y_ref = numpy.array([0.555991469319657, 0.002773650670010], 'float64')
    z_ref = numpy.array([
        8.2228e-20, 3.15216909492e-13, -1.48616735364395e-10, 1.0625905e-17,
        3.7150503117895e-11, 1.71104e-19
    ], 'float64')
    assert numpy.allclose(m.y, y_ref, eps)
    assert numpy.allclose(m.z, z_ref, eps)

    #Testing exceptions
    nose.tools.assert_raises(RuntimeError, t.initialize, mb, [1, 2, 2])
    nose.tools.assert_raises(RuntimeError, t.initialize, mb, [[1, 2, 2]])
    nose.tools.assert_raises(RuntimeError, t.e_step_u, mb, [1, 2, 2])
    nose.tools.assert_raises(RuntimeError, t.e_step_u, mb, [[1, 2, 2]])
    nose.tools.assert_raises(RuntimeError, t.m_step_u, mb, [1, 2, 2])
    nose.tools.assert_raises(RuntimeError, t.m_step_u, mb, [[1, 2, 2]])

    nose.tools.assert_raises(RuntimeError, t.e_step_v, mb, [1, 2, 2])
    nose.tools.assert_raises(RuntimeError, t.e_step_v, mb, [[1, 2, 2]])
    nose.tools.assert_raises(RuntimeError, t.m_step_v, mb, [1, 2, 2])
    nose.tools.assert_raises(RuntimeError, t.m_step_v, mb, [[1, 2, 2]])

    nose.tools.assert_raises(RuntimeError, t.e_step_d, mb, [1, 2, 2])
    nose.tools.assert_raises(RuntimeError, t.e_step_d, mb, [[1, 2, 2]])
    nose.tools.assert_raises(RuntimeError, t.m_step_d, mb, [1, 2, 2])
    nose.tools.assert_raises(RuntimeError, t.m_step_d, mb, [[1, 2, 2]])

    nose.tools.assert_raises(RuntimeError, t.enroll, m, [[1, 2, 2]], 5)
def test_JFATrainAndEnrol():
  # Train and enroll a JFAMachine

  # Calls the train function
  ubm = GMMMachine(2,3)
  ubm.mean_supervector = UBM_MEAN
  ubm.variance_supervector = UBM_VAR
  mb = JFABase(ubm, 2, 2)
  t = JFATrainer()
  t.initialize(mb, TRAINING_STATS)
  mb.u = M_u
  mb.v = M_v
  mb.d = M_d
  bob.learn.em.train_jfa(t,mb, TRAINING_STATS, initialize=False)

  v_ref = numpy.array([[0.245364911936476, 0.978133261775424], [0.769646805052223, 0.940070736856596], [0.310779202800089, 1.456332053893072],
        [0.184760934399551, 2.265139705602147], [0.701987784039800, 0.081632150899400], [0.074344030229297, 1.090248340917255]], 'float64')
  u_ref = numpy.array([[0.049424652628448, 0.060480486336896], [0.178104127464007, 1.884873813495153], [1.204011484266777, 2.281351307871720],
        [7.278512126426286, -0.390966087173334], [-0.084424326581145, -0.081725474934414], [4.042143689831097, -0.262576386580701]], 'float64')
  d_ref = numpy.array([9.648467e-18, 2.63720683155e-12, 2.11822157653706e-10, 9.1047243e-17, 1.41163442535567e-10, 3.30581e-19], 'float64')

  eps = 1e-10
  assert numpy.allclose(mb.v, v_ref, eps)
  assert numpy.allclose(mb.u, u_ref, eps)
  assert numpy.allclose(mb.d, d_ref, eps)

  # Calls the enroll function
  m = JFAMachine(mb)

  Ne = numpy.array([0.1579, 0.9245, 0.1323, 0.2458]).reshape((2,2))
  Fe = numpy.array([0.1579, 0.1925, 0.3242, 0.1234, 0.2354, 0.2734, 0.2514, 0.5874, 0.3345, 0.2463, 0.4789, 0.5236]).reshape((6,2))
  gse1 = GMMStats(2,3)
  gse1.n = Ne[:,0]
  gse1.sum_px = Fe[:,0].reshape(2,3)
  gse2 = GMMStats(2,3)
  gse2.n = Ne[:,1]
  gse2.sum_px = Fe[:,1].reshape(2,3)

  gse = [gse1, gse2]
  t.enroll(m, gse, 5)

  y_ref = numpy.array([0.555991469319657, 0.002773650670010], 'float64')
  z_ref = numpy.array([8.2228e-20, 3.15216909492e-13, -1.48616735364395e-10, 1.0625905e-17, 3.7150503117895e-11, 1.71104e-19], 'float64')
  assert numpy.allclose(m.y, y_ref, eps)
  assert numpy.allclose(m.z, z_ref, eps)
  
  #Testing exceptions
  nose.tools.assert_raises(RuntimeError, t.initialize, mb, [1,2,2])  
  nose.tools.assert_raises(RuntimeError, t.initialize, mb, [[1,2,2]])
  nose.tools.assert_raises(RuntimeError, t.e_step_u, mb, [1,2,2])  
  nose.tools.assert_raises(RuntimeError, t.e_step_u, mb, [[1,2,2]])
  nose.tools.assert_raises(RuntimeError, t.m_step_u, mb, [1,2,2])  
  nose.tools.assert_raises(RuntimeError, t.m_step_u, mb, [[1,2,2]])
  
  nose.tools.assert_raises(RuntimeError, t.e_step_v, mb, [1,2,2])  
  nose.tools.assert_raises(RuntimeError, t.e_step_v, mb, [[1,2,2]])  
  nose.tools.assert_raises(RuntimeError, t.m_step_v, mb, [1,2,2])  
  nose.tools.assert_raises(RuntimeError, t.m_step_v, mb, [[1,2,2]])  
    
  nose.tools.assert_raises(RuntimeError, t.e_step_d, mb, [1,2,2])  
  nose.tools.assert_raises(RuntimeError, t.e_step_d, mb, [[1,2,2]])
  nose.tools.assert_raises(RuntimeError, t.m_step_d, mb, [1,2,2])  
  nose.tools.assert_raises(RuntimeError, t.m_step_d, mb, [[1,2,2]])
  
  nose.tools.assert_raises(RuntimeError, t.enroll, m, [[1,2,2]],5)