Beispiel #1
0
def test_gmm_test():

  # Tests a GMMMachine by computing scores against a model and compare to
  # an old reference

  ar = bob.io.base.load(datafile('dataforMAP.hdf5', __name__, path="../data/"))

  # Initialize GMMMachine
  n_gaussians = 5
  n_inputs = 45
  gmm = GMMMachine(n_gaussians, n_inputs)
  gmm.means = bob.io.base.load(datafile('meansAfterML.hdf5', __name__, path="../data/"))
  gmm.variances = bob.io.base.load(datafile('variancesAfterML.hdf5', __name__, path="../data/"))
  gmm.weights = bob.io.base.load(datafile('weightsAfterML.hdf5', __name__, path="../data/"))

  threshold = 0.001
  gmm.set_variance_thresholds(threshold)

  # Test against the model
  score_mean_ref = -1.50379e+06
  score = 0.
  for v in ar: score += gmm(v)
  score /= len(ar)

  # Compare current results to torch3vision
  assert abs(score-score_mean_ref)/score_mean_ref<1e-4
Beispiel #2
0
def test_gmm_MAP_2():

  # Train a GMMMachine with MAP_GMMTrainer and compare with matlab reference

  data = bob.io.base.load(datafile('data.hdf5', __name__, path="../data/"))
  data = data.reshape((1, data.shape[0])) # make a 2D array out of it
  means = bob.io.base.load(datafile('means.hdf5', __name__, path="../data/"))
  variances = bob.io.base.load(datafile('variances.hdf5', __name__, path="../data/"))
  weights = bob.io.base.load(datafile('weights.hdf5', __name__, path="../data/"))

  gmm = GMMMachine(2,50)
  gmm.means = means
  gmm.variances = variances
  gmm.weights = weights

  map_adapt = MAP_GMMTrainer(update_means=True, update_variances=False, update_weights=False, mean_var_update_responsibilities_threshold=0.,prior_gmm=gmm, relevance_factor=4.)

  gmm_adapted = GMMMachine(2,50)
  gmm_adapted.means = means
  gmm_adapted.variances = variances
  gmm_adapted.weights = weights

  #map_adapt.max_iterations = 1
  #map_adapt.train(gmm_adapted, data)
  bob.learn.em.train(map_adapt, gmm_adapted, data, max_iterations = 1)

  new_means = bob.io.base.load(datafile('new_adapted_mean.hdf5', __name__, path="../data/"))

 # print new_means[0,:]
 # print gmm_adapted.means[:,0]

  # Compare to matlab reference
  assert equals(new_means[0,:], gmm_adapted.means[:,0], 1e-4)
  assert equals(new_means[1,:], gmm_adapted.means[:,1], 1e-4)
Beispiel #3
0
def test_gmm_test():

    # Tests a GMMMachine by computing scores against a model and compare to
    # an old reference

    ar = bob.io.base.load(
        datafile('dataforMAP.hdf5', __name__, path="../data/"))

    # Initialize GMMMachine
    n_gaussians = 5
    n_inputs = 45
    gmm = GMMMachine(n_gaussians, n_inputs)
    gmm.means = bob.io.base.load(
        datafile('meansAfterML.hdf5', __name__, path="../data/"))
    gmm.variances = bob.io.base.load(
        datafile('variancesAfterML.hdf5', __name__, path="../data/"))
    gmm.weights = bob.io.base.load(
        datafile('weightsAfterML.hdf5', __name__, path="../data/"))

    threshold = 0.001
    gmm.set_variance_thresholds(threshold)

    # Test against the model
    score_mean_ref = -1.50379e+06
    score = 0.
    for v in ar:
        score += gmm(v)
    score /= len(ar)

    # Compare current results to torch3vision
    assert abs(score - score_mean_ref) / score_mean_ref < 1e-4
def test_netpbm():

  transcode(test_utils.datafile('test.pbm', __name__))  # indexed, works fine
  transcode(test_utils.datafile('test.pgm', __name__))  # indexed, works fine
  transcode(test_utils.datafile('test.ppm', __name__))  # indexed, works fine
  transcode(test_utils.datafile('test_2.pgm', __name__))  # indexed, works fine
  transcode(test_utils.datafile('test_2.ppm', __name__))  # indexed, works fine
def test_matlab_baseline():
    """

    Tests based on this matlab baseline

    http://www-scf.usc.edu/~boqinggo/domainadaptation.html#intro

    """
    import numpy
    numpy.random.seed(10)

    source_webcam = bob.io.matlab.read_matrix(datafile("webcam.mat", __name__))
    webcam_labels = bob.io.matlab.read_matrix(
        datafile("webcam_labels.mat", __name__))

    target_dslr = bob.io.matlab.read_matrix(datafile("dslr.mat", __name__))
    dslr_labels = bob.io.matlab.read_matrix(
        datafile("dslr_labels.mat", __name__))

    gfk_trainer = GFKTrainer(10,
                             subspace_dim_source=140,
                             subspace_dim_target=140)
    gfk_machine = gfk_trainer.train(source_webcam, target_dslr)

    accuracy = compute_accuracy(gfk_machine.G, source_webcam, webcam_labels,
                                target_dslr, dslr_labels) * 100
    assert accuracy > 70
Beispiel #6
0
def test_cglogreg_norm_slow():

    pos1 = bob.io.base.load(datafile('positives_isv.hdf5', __name__))
    neg1 = bob.io.base.load(datafile('negatives_isv.hdf5', __name__))

    pos2 = bob.io.base.load(datafile('positives_lda.hdf5', __name__))
    neg2 = bob.io.base.load(datafile('negatives_lda.hdf5', __name__))

    negatives = numpy.vstack((neg1, neg2)).T
    positives = numpy.vstack((pos1, pos2)).T

    T = CGLogRegTrainer(0.5, 1e-10, 10000, mean_std_norm=True)

    # apply it to test data
    test1 = [1., -50.]
    test2 = [0.5, -86.]

    res1 = machine(test1)
    res2 = machine(test2)

    # try the training without normalization
    machine = T.train(negatives, positives)
    # check that the results are at least approximately equal
    # Note: lower values for epsilon and higher number of iterations improve the stability)
    assert abs(machine(test1) - res1) < 1e-3
    assert abs(machine(test2) - res2) < 1e-3
Beispiel #7
0
def test_uint8_histoPython():

  # Compute the histogram of a uint8 image
  input_image = bob.io.base.load(datafile('image.hdf5', "bob.ip.base"))


  histo1 = bob.ip.base.histogram(input_image)
  histo2 = bob.ip.base.histogram(input_image, 256)
  histo3 = bob.ip.base.histogram(input_image, (0, 255), 256)

  histo4 = numpy.ndarray((256,), numpy.uint64)
  histo5 = numpy.ndarray((256,), numpy.uint64)

  bob.ip.base.histogram(input_image, histo4)
  bob.ip.base.histogram(input_image, (0, 255), histo5)

  # Save the computed data
  #bob.io.base.save(histo1, datafile('image_histo.hdf5', 'bob.ip.base', 'data/histo'))

  histo_ref = bob.io.base.load(datafile('image_histo.hdf5', 'bob.ip.base', 'data/histo'))

  assert input_image.size == histo1.sum()
  assert input_image.size == histo2.sum()
  assert input_image.size == histo3.sum()
  assert input_image.size == histo4.sum()
  assert input_image.size == histo5.sum()
  assert (histo_ref == histo1).all()
  assert (histo_ref == histo2).all()
  assert (histo_ref == histo3).all()
  assert (histo_ref == histo4).all()
  assert (histo_ref == histo5).all()
Beispiel #8
0
def test_uint8_histoPython():

    # Compute the histogram of a uint8 image
    input_image = bob.io.base.load(datafile('image.hdf5', "bob.ip.base"))

    histo1 = bob.ip.base.histogram(input_image)
    histo2 = bob.ip.base.histogram(input_image, 256)
    histo3 = bob.ip.base.histogram(input_image, (0, 255), 256)

    histo4 = numpy.ndarray((256, ), numpy.uint64)
    histo5 = numpy.ndarray((256, ), numpy.uint64)

    bob.ip.base.histogram(input_image, histo4)
    bob.ip.base.histogram(input_image, (0, 255), histo5)

    # Save the computed data
    #bob.io.base.save(histo1, datafile('image_histo.hdf5', 'bob.ip.base', 'data/histo'))

    histo_ref = bob.io.base.load(
        datafile('image_histo.hdf5', 'bob.ip.base', 'data/histo'))

    assert input_image.size == histo1.sum()
    assert input_image.size == histo2.sum()
    assert input_image.size == histo3.sum()
    assert input_image.size == histo4.sum()
    assert input_image.size == histo5.sum()
    assert (histo_ref == histo1).all()
    assert (histo_ref == histo2).all()
    assert (histo_ref == histo3).all()
    assert (histo_ref == histo4).all()
    assert (histo_ref == histo5).all()
def test_ztnorm_big():
    my_A = bob.io.base.load(
        datafile("ztnorm_eval_eval.hdf5", __name__, path="../data/"))
    my_B = bob.io.base.load(
        datafile("ztnorm_znorm_eval.hdf5", __name__, path="../data/"))
    my_C = bob.io.base.load(
        datafile("ztnorm_eval_tnorm.hdf5", __name__, path="../data/"))
    my_D = bob.io.base.load(
        datafile("ztnorm_znorm_tnorm.hdf5", __name__, path="../data/"))

    # ZT-Norm
    ref_scores = bob.io.base.load(
        datafile("ztnorm_result.hdf5", __name__, path="../data/"))
    scores = bob.learn.em.ztnorm(my_A, my_B, my_C, my_D)
    assert (abs(scores - ref_scores) < 1e-7).all()

    # T-Norm
    scores = bob.learn.em.tnorm(my_A, my_C)
    scores_py = tnorm(my_A, my_C)
    assert (abs(scores - scores_py) < 1e-7).all()

    # Z-Norm
    scores = bob.learn.em.znorm(my_A, my_B)
    scores_py = znorm(my_A, my_B)
    assert (abs(scores - scores_py) < 1e-7).all()
Beispiel #10
0
def test_video_like_container():
    path = datafile("testvideo.avi", "bob.bio.video.test")

    video = bob.bio.video.VideoAsArray(path,
                                       selection_style="spread",
                                       max_number_of_frames=3)
    container = bob.bio.video.VideoLikeContainer(video, video.indices)

    container_path = datafile("video_like.hdf5", "bob.bio.video.test")

    if regenerate_refs:
        container.save(container_path)

    loaded_container = bob.bio.video.VideoLikeContainer.load(container_path)
    if platform.machine() == "arm64" and platform.system() == "Darwin":
        raise nose.SkipTest("Skipping test on arm64 macos")
    np.testing.assert_allclose(loaded_container.indices, container.indices)
    np.testing.assert_allclose(loaded_container.data, container.data)
    assert container == loaded_container

    # test saving and loading None arrays
    with tempfile.NamedTemporaryFile(suffix=".pkl") as f:
        data = [None] * 10 + [1]
        indices = range(11)
        frame_container = bob.bio.video.VideoLikeContainer(data, indices)
        frame_container.save(f.name)

        loaded = bob.bio.video.VideoLikeContainer.load(f.name)
        np.testing.assert_equal(loaded.indices, frame_container.indices)
        np.testing.assert_equal(loaded.data, frame_container.data)
        assert loaded == frame_container
Beispiel #11
0
def test_cglogreg_norm_slow():

  pos1 = bob.io.base.load(datafile('positives_isv.hdf5', __name__))
  neg1 = bob.io.base.load(datafile('negatives_isv.hdf5', __name__))

  pos2 = bob.io.base.load(datafile('positives_lda.hdf5', __name__))
  neg2 = bob.io.base.load(datafile('negatives_lda.hdf5', __name__))

  negatives = numpy.vstack((neg1, neg2)).T
  positives = numpy.vstack((pos1, pos2)).T

  T = CGLogRegTrainer(0.5, 1e-10, 10000, mean_std_norm=True)

  # apply it to test data
  test1 = [1., -50.]
  test2 = [0.5, -86.]

  res1 = machine(test1)
  res2 = machine(test2)

  # try the training without normalization
  machine = T.train(negatives, positives)
  # check that the results are at least approximately equal
  # Note: lower values for epsilon and higher number of iterations improve the stability)
  assert abs(machine(test1) - res1) < 1e-3
  assert abs(machine(test2) - res2) < 1e-3
Beispiel #12
0
def test_netpbm():

    transcode(test_utils.datafile('test.pbm', __name__))  # indexed, works fine
    transcode(test_utils.datafile('test.pgm', __name__))  # indexed, works fine
    transcode(test_utils.datafile('test.ppm', __name__))  # indexed, works fine
    transcode(test_utils.datafile('test_2.pgm',
                                  __name__))  # indexed, works fine
    transcode(test_utils.datafile('test_2.ppm',
                                  __name__))  # indexed, works fine
Beispiel #13
0
def loadGMM():
  gmm = GMMMachine(2, 2)

  gmm.weights = bob.io.base.load(datafile('gmm.init_weights.hdf5', __name__, path="../data/"))
  gmm.means = bob.io.base.load(datafile('gmm.init_means.hdf5', __name__, path="../data/"))
  gmm.variances = bob.io.base.load(datafile('gmm.init_variances.hdf5', __name__, path="../data/"))
  #gmm.variance_thresholds = numpy.array([[0.001, 0.001],[0.001, 0.001]], 'float64')

  return gmm
Beispiel #14
0
def loadGMM():
    gmm = GMMMachine(2, 2)

    gmm.weights = bob.io.base.load(
        datafile('gmm.init_weights.hdf5', __name__, path="../data/"))
    gmm.means = bob.io.base.load(
        datafile('gmm.init_means.hdf5', __name__, path="../data/"))
    gmm.variances = bob.io.base.load(
        datafile('gmm.init_variances.hdf5', __name__, path="../data/"))
    #gmm.variance_thresholds = numpy.array([[0.001, 0.001],[0.001, 0.001]], 'float64')

    return gmm
Beispiel #15
0
def test_GMMMachine_3():
  # Test a GMMMachine (log-likelihood computation)

  data = bob.io.base.load(datafile('data.hdf5', __name__, path="../data/"))
  gmm = GMMMachine(2, 50)
  gmm.weights   = bob.io.base.load(datafile('weights.hdf5', __name__, path="../data/"))
  gmm.means     = bob.io.base.load(datafile('means.hdf5', __name__, path="../data/"))
  gmm.variances = bob.io.base.load(datafile('variances.hdf5', __name__, path="../data/"))

  # Compare the log-likelihood with the one obtained using Chris Matlab
  # implementation
  matlab_ll_ref = -2.361583051672024e+02
  assert abs(gmm(data) - matlab_ll_ref) < 1e-10
def test_kmeans_a():

  # Trains a KMeansMachine
  # This files contains draws from two 1D Gaussian distributions:
  #   * 100 samples from N(-10,1)
  #   * 100 samples from N(10,1)
  data = bob.io.base.load(datafile("samplesFrom2G_f64.hdf5", __name__, path="../data/"))

  machine = KMeansMachine(2, 1)

  trainer = KMeansTrainer()
  #trainer.train(machine, data)
  bob.learn.em.train(trainer,machine,data)

  [variances, weights] = machine.get_variances_and_weights_for_each_cluster(data)
  variances_b = numpy.ndarray(shape=(2,1), dtype=numpy.float64)
  weights_b = numpy.ndarray(shape=(2,), dtype=numpy.float64)
  machine.__get_variances_and_weights_for_each_cluster_init__(variances_b, weights_b)
  machine.__get_variances_and_weights_for_each_cluster_acc__(data, variances_b, weights_b)
  machine.__get_variances_and_weights_for_each_cluster_fin__(variances_b, weights_b)
  m1 = machine.get_mean(0)
  m2 = machine.get_mean(1)

  ## Check means [-10,10] / variances [1,1] / weights [0.5,0.5]
  if(m1<m2): means=numpy.array(([m1[0],m2[0]]), 'float64')
  else: means=numpy.array(([m2[0],m1[0]]), 'float64')
  assert equals(means, numpy.array([-10.,10.]), 2e-1)
  assert equals(variances, numpy.array([1.,1.]), 2e-1)
  assert equals(weights, numpy.array([0.5,0.5]), 1e-3)

  assert equals(variances, variances_b, 1e-8)
  assert equals(weights, weights_b, 1e-8)
Beispiel #17
0
def test_GMMMachine_3():
    # Test a GMMMachine (log-likelihood computation)

    data = bob.io.base.load(datafile('data.hdf5', __name__, path="../data/"))
    gmm = GMMMachine(2, 50)
    gmm.weights = bob.io.base.load(
        datafile('weights.hdf5', __name__, path="../data/"))
    gmm.means = bob.io.base.load(
        datafile('means.hdf5', __name__, path="../data/"))
    gmm.variances = bob.io.base.load(
        datafile('variances.hdf5', __name__, path="../data/"))

    # Compare the log-likelihood with the one obtained using Chris Matlab
    # implementation
    matlab_ll_ref = -2.361583051672024e+02
    assert abs(gmm(data) - matlab_ll_ref) < 1e-10
Beispiel #18
0
 def wrapper(*args, **kwargs):
     dbfile = datafile("db.sql3", __name__, None)
     if os.path.exists(dbfile):
         return test(*args, **kwargs)
     else:
         raise SkipTest("The database file '%s' is not available; did you forget to run 'bob_dbmanage.py %s create' ?" % (
             dbfile, 'replaymobile'))
Beispiel #19
0
def test_get_skin_pixels():
  """
  Test the skin colored pixels detection
  """

  # to run face detection
  import bob.ip.facedetect
  
  mod = sys.modules.get(__name__) or loader.load_module(__name__)
 
  # load face image
  face = load(datafile('001.jpg', 'bob.rppg.base'))

  from bob.rppg.ssr.ssr_utils import get_skin_pixels
  
  # zero threshold -> the number of skin pixels is the number of pixels in the cropped face
  skin_pixels = get_skin_pixels(face, 0, True, 0.0)
  bbox, quality = bob.ip.facedetect.detect_single_face(face)
  assert skin_pixels.shape[1] == (bbox.size[0] - 1) * bbox.size[1] # -1 because of the cropping

  # same as before, but with provided bbox
  bounding_boxes = [bbox] 
  skin_pixels = get_skin_pixels(face, 0, True, 0, bounding_boxes)
  assert skin_pixels.shape[1] == (bbox.size[0] - 1) * bbox.size[1] # -1 because of the cropping

  # threshold of 1.0 -> zero skin pixels
  skin_pixels = get_skin_pixels(face, 0, True, 1, bounding_boxes)
  assert skin_pixels.shape[1] == 0
Beispiel #20
0
def test_kmeans_a():
    # Trains a KMeansMachine
    # This files contains draws from two 1D Gaussian distributions:
    #   * 100 samples from N(-10,1)
    #   * 100 samples from N(10,1)
    data = bob.io.base.load(datafile("samplesFrom2G_f64.hdf5", __name__, path="../data/"))

    machine = KMeansMachine(2, 1)

    trainer = KMeansTrainer()
    # trainer.train(machine, data)
    bob.learn.em.train(trainer, machine, data)

    [variances, weights] = machine.get_variances_and_weights_for_each_cluster(data)
    variances_b = numpy.ndarray(shape=(2, 1), dtype=numpy.float64)
    weights_b = numpy.ndarray(shape=(2,), dtype=numpy.float64)
    machine.__get_variances_and_weights_for_each_cluster_init__(variances_b, weights_b)
    machine.__get_variances_and_weights_for_each_cluster_acc__(data, variances_b, weights_b)
    machine.__get_variances_and_weights_for_each_cluster_fin__(variances_b, weights_b)
    m1 = machine.get_mean(0)
    m2 = machine.get_mean(1)

    ## Check means [-10,10] / variances [1,1] / weights [0.5,0.5]
    if (m1 < m2):
        means = numpy.array(([m1[0], m2[0]]), 'float64')
    else:
        means = numpy.array(([m2[0], m1[0]]), 'float64')
    assert equals(means, numpy.array([-10., 10.]), 2e-1)
    assert equals(variances, numpy.array([1., 1.]), 2e-1)
    assert equals(weights, numpy.array([0.5, 0.5]), 1e-3)

    assert equals(variances, variances_b, 1e-8)
    assert equals(weights, weights_b, 1e-8)
Beispiel #21
0
def test_cglogreg_norm_keyword():

    # read some real test data;
    # for toy examples the results are quite different...

    pos1 = bob.io.base.load(datafile('positives_isv.hdf5', __name__))
    neg1 = bob.io.base.load(datafile('negatives_isv.hdf5', __name__))

    pos2 = bob.io.base.load(datafile('positives_lda.hdf5', __name__))
    neg2 = bob.io.base.load(datafile('negatives_lda.hdf5', __name__))

    negatives = numpy.vstack((neg1, neg2)).T
    positives = numpy.vstack((pos1, pos2)).T

    # Train the machine after mean-std norm
    T = CGLogRegTrainer(0.5, 1e-10, 10000, reg=0.0001, mean_std_norm=True)
    machine = T.train(negatives, positives)

    # assert that mean and variance are correct
    mean = numpy.mean(numpy.vstack((positives, negatives)), 0)
    std = numpy.std(numpy.vstack((positives, negatives)), 0)

    assert (abs(machine.input_subtract - mean) < 1e-10).all()
    assert (abs(machine.input_divide - std) < 1e-10).all()

    # apply it to test data
    test1 = [1., -50.]
    test2 = [0.5, -86.]

    res1 = machine(test1)
    res2 = machine(test2)

    # normalize training data
    pos = numpy.vstack([(positives[i] - mean) / std
                        for i in range(len(positives))])
    neg = numpy.vstack([(negatives[i] - mean) / std
                        for i in range(len(negatives))])

    # re-train the machine; should give identical results
    T.mean_std_norm = False
    machine = T.train(neg, pos)
    machine.input_subtract = mean
    machine.input_divide = std

    # assert that the result is the same
    assert abs(machine(test1) - res1) < 1e-10
    assert abs(machine(test2) - res2) < 1e-10
Beispiel #22
0
def test_gmm_MAP_1():

  # Train a GMMMachine with MAP_GMMTrainer

  ar = bob.io.base.load(datafile('faithful.torch3_f64.hdf5', __name__, path="../data/"))

  gmm = GMMMachine(bob.io.base.HDF5File(datafile("gmm_ML.hdf5", __name__, path="../data/")))
  gmmprior = GMMMachine(bob.io.base.HDF5File(datafile("gmm_ML.hdf5", __name__, path="../data/")))

  map_gmmtrainer = MAP_GMMTrainer(update_means=True, update_variances=False, update_weights=False, prior_gmm=gmmprior, relevance_factor=4.)

  #map_gmmtrainer.train(gmm, ar)
  bob.learn.em.train(map_gmmtrainer, gmm, ar)

  gmm_ref = GMMMachine(bob.io.base.HDF5File(datafile('gmm_MAP.hdf5', __name__, path="../data/")))

  assert (equals(gmm.means,gmm_ref.means,1e-3) and equals(gmm.variances,gmm_ref.variances,1e-3) and equals(gmm.weights,gmm_ref.weights,1e-3))
Beispiel #23
0
 def wrapper(*args, **kwargs):
     dbfile = datafile("db.sql3", __name__, None)
     if os.path.exists(dbfile):
         return test(*args, **kwargs)
     else:
         raise SkipTest(
             "The database file '%s' is not available; did you forget to run 'bob_dbmanage.py %s create' ?" % (
             dbfile, 'avspoof'))
Beispiel #24
0
 def wrapper(*args, **kwargs):
     dbfile = datafile("db.sql3", __name__, None)
     if os.path.exists(dbfile):
         return test(*args, **kwargs)
     else:
         raise SkipTest(
             "The interface SQL file (%s) is not available; did you forget to run 'bob_dbmanage.py %s create' ?"
             % (dbfile, 'vera'))
Beispiel #25
0
def test_cglogreg_norm_keyword():

  # read some real test data;
  # for toy examples the results are quite different...

  pos1 = bob.io.base.load(datafile('positives_isv.hdf5', __name__))
  neg1 = bob.io.base.load(datafile('negatives_isv.hdf5', __name__))

  pos2 = bob.io.base.load(datafile('positives_lda.hdf5', __name__))
  neg2 = bob.io.base.load(datafile('negatives_lda.hdf5', __name__))

  negatives = numpy.vstack((neg1, neg2)).T
  positives = numpy.vstack((pos1, pos2)).T

  # Train the machine after mean-std norm
  T = CGLogRegTrainer(0.5, 1e-10, 10000, reg=0.0001, mean_std_norm=True)
  machine = T.train(negatives,positives)

  # assert that mean and variance are correct
  mean = numpy.mean(numpy.vstack((positives, negatives)), 0)
  std = numpy.std(numpy.vstack((positives, negatives)), 0)

  assert (abs(machine.input_subtract - mean) < 1e-10).all()
  assert (abs(machine.input_divide - std) < 1e-10).all()

  # apply it to test data
  test1 = [1., -50.]
  test2 = [0.5, -86.]

  res1 = machine(test1)
  res2 = machine(test2)

  # normalize training data
  pos = numpy.vstack([(positives[i] - mean) / std for i in range(len(positives))])
  neg = numpy.vstack([(negatives[i] - mean) / std for i in range(len(negatives))])

  # re-train the machine; should give identical results
  T.mean_std_norm = False
  machine = T.train(neg, pos)
  machine.input_subtract = mean
  machine.input_divide = std

  # assert that the result is the same
  assert abs(machine(test1) - res1) < 1e-10
  assert abs(machine(test2) - res2) < 1e-10
Beispiel #26
0
def test_GMMMachine_4():

  import numpy
  numpy.random.seed(3) # FIXING A SEED

  data = numpy.random.rand(100,50) #Doesn't matter if it is ramdom. The average of 1D array (in python) MUST output the same result for the 2D array (in C++)
  
  gmm = GMMMachine(2, 50)
  gmm.weights   = bob.io.base.load(datafile('weights.hdf5', __name__, path="../data/"))
  gmm.means     = bob.io.base.load(datafile('means.hdf5', __name__, path="../data/"))
  gmm.variances = bob.io.base.load(datafile('variances.hdf5', __name__, path="../data/"))


  ll = 0
  for i in range(data.shape[0]):
    ll += gmm(data[i,:])
  ll /= data.shape[0]
  
  assert ll==gmm(data)
Beispiel #27
0
def test_gmm_ML_1():

  # Trains a GMMMachine with ML_GMMTrainer

  ar = bob.io.base.load(datafile("faithful.torch3_f64.hdf5", __name__, path="../data/"))
  gmm = loadGMM()

  ml_gmmtrainer = ML_GMMTrainer(True, True, True)
  #ml_gmmtrainer.train(gmm, ar)
  bob.learn.em.train(ml_gmmtrainer, gmm, ar, convergence_threshold=0.001)

  #config = bob.io.base.HDF5File(datafile('gmm_ML.hdf5", __name__), 'w')
  #gmm.save(config)

  gmm_ref = GMMMachine(bob.io.base.HDF5File(datafile('gmm_ML.hdf5', __name__, path="../data/")))
  gmm_ref_32bit_debug = GMMMachine(bob.io.base.HDF5File(datafile('gmm_ML_32bit_debug.hdf5', __name__, path="../data/")))
  gmm_ref_32bit_release = GMMMachine(bob.io.base.HDF5File(datafile('gmm_ML_32bit_release.hdf5', __name__, path="../data/")))

  assert (gmm == gmm_ref) or (gmm == gmm_ref_32bit_release) or (gmm == gmm_ref_32bit_release)
Beispiel #28
0
def test_io():

    raise SkipTest("TODO: Not fully implemented yet")

    # Checks that the IO functionality of LBP works
    test_file = datafile("LBP.hdf5", __name__)
    temp_file = temporary_filename()

    # create file
    lbp1 = bob.ip.base.LBP(8, (2, 3),
                           elbp_type="transitional",
                           to_average=True,
                           add_average_bit=True)
    lbp2 = bob.ip.base.LBP(16,
                           4.,
                           2.,
                           uniform=True,
                           rotation_invariant=True,
                           circular=True)

    # re-generate the reference file, if wanted
    f = bob.io.base.HDF5File(temp_file, 'w')
    f.create_group("LBP1")
    f.create_group("LBP2")
    f.cd("/LBP1")
    lbp1.save(f)
    f.cd("/LBP2")
    lbp2.save(f)
    del f

    # load the file again
    f = bob.io.base.HDF5File(temp_file)
    f.cd("/LBP1")
    read1 = bob.ip.base.LBP(f)
    f.cd("/LBP2")
    read2 = bob.ip.base.LBP(f)
    del f

    # assert that the created and the read object are identical
    assert lbp1 == read1
    assert lbp2 == read2

    # load the reference file
    f = bob.io.base.HDF5File(test_file)
    f.cd("/LBP1")
    ref1 = bob.ip.base.LBP(f)
    f.cd("/LBP2")
    ref2 = bob.ip.base.LBP(f)
    del f

    # assert that the lbp objects and the reference ones are identical
    assert lbp1 == ref1
    assert lbp2 == ref2
    assert read1 == ref1
    assert read2 == ref2
Beispiel #29
0
def test_gmm_MAP_2():

    # Train a GMMMachine with MAP_GMMTrainer and compare with matlab reference

    data = bob.io.base.load(datafile('data.hdf5', __name__, path="../data/"))
    data = data.reshape((1, data.shape[0]))  # make a 2D array out of it
    means = bob.io.base.load(datafile('means.hdf5', __name__, path="../data/"))
    variances = bob.io.base.load(
        datafile('variances.hdf5', __name__, path="../data/"))
    weights = bob.io.base.load(
        datafile('weights.hdf5', __name__, path="../data/"))

    gmm = GMMMachine(2, 50)
    gmm.means = means
    gmm.variances = variances
    gmm.weights = weights

    map_adapt = MAP_GMMTrainer(update_means=True,
                               update_variances=False,
                               update_weights=False,
                               mean_var_update_responsibilities_threshold=0.,
                               prior_gmm=gmm,
                               relevance_factor=4.)

    gmm_adapted = GMMMachine(2, 50)
    gmm_adapted.means = means
    gmm_adapted.variances = variances
    gmm_adapted.weights = weights

    #map_adapt.max_iterations = 1
    #map_adapt.train(gmm_adapted, data)
    bob.learn.em.train(map_adapt, gmm_adapted, data, max_iterations=1)

    new_means = bob.io.base.load(
        datafile('new_adapted_mean.hdf5', __name__, path="../data/"))

    # print new_means[0,:]
    # print gmm_adapted.means[:,0]

    # Compare to matlab reference
    assert equals(new_means[0, :], gmm_adapted.means[:, 0], 1e-4)
    assert equals(new_means[1, :], gmm_adapted.means[:, 1], 1e-4)
Beispiel #30
0
def test_processing():
  # Processing tests
  A = bob.io.base.load(datafile("vlimg_ref.hdf5", 'bob.ip.base', 'data/sift'))
  No = 3
  Ns = 3
  sigma0 = 1.6
  sigma_n = 0.5
  cont_t = 0.03
  edge_t = 10.
  norm_t = 0.2
  f=4.
  op = bob.ip.base.SIFT(A.shape,Ns,No,0,sigma_n,sigma0,cont_t,edge_t,norm_t,f,bob.sp.BorderType.NearestNeighbour)
  kp=[bob.ip.base.GSSKeypoint(1.6,(326,270))]
  B = numpy.ndarray(op.output_shape(1), numpy.float64)
  op.compute_descriptor(A,kp,B)
  C=B[0]
  #bob.io.base.save(C, datafile(os.path.join("sift","vlimg_ref_cmp.hdf5"), __name__)) # Generated using initial bob version
  C_ref = bob.io.base.load(datafile("vlimg_ref_cmp.hdf5", 'bob.ip.base', 'data/sift'))
  assert numpy.allclose(C, C_ref, 1e-5, 1e-5)
  """
Beispiel #31
0
def test_ztnorm_big():
  my_A = bob.io.base.load(datafile("ztnorm_eval_eval.hdf5", __name__, path="../data/"))
  my_B = bob.io.base.load(datafile("ztnorm_znorm_eval.hdf5", __name__, path="../data/"))
  my_C = bob.io.base.load(datafile("ztnorm_eval_tnorm.hdf5", __name__, path="../data/"))
  my_D = bob.io.base.load(datafile("ztnorm_znorm_tnorm.hdf5", __name__, path="../data/"))

  # ZT-Norm
  ref_scores = bob.io.base.load(datafile("ztnorm_result.hdf5", __name__, path="../data/"))
  scores = bob.learn.em.ztnorm(my_A, my_B, my_C, my_D)
  assert (abs(scores - ref_scores) < 1e-7).all()

  # T-Norm
  scores = bob.learn.em.tnorm(my_A, my_C)
  scores_py = tnorm(my_A, my_C)
  assert (abs(scores - scores_py) < 1e-7).all()

  # Z-Norm
  scores = bob.learn.em.znorm(my_A, my_B)
  scores_py = znorm(my_A, my_B)
  assert (abs(scores - scores_py) < 1e-7).all()
Beispiel #32
0
def test_GMMMachine_2():
  # Test a GMMMachine (statistics)

  arrayset = bob.io.base.load(datafile("faithful.torch3_f64.hdf5", __name__, path="../data/"))
  gmm = GMMMachine(2, 2)
  gmm.weights   = numpy.array([0.5, 0.5], 'float64')
  gmm.means     = numpy.array([[3, 70], [4, 72]], 'float64')
  gmm.variances = numpy.array([[1, 10], [2, 5]], 'float64')
  gmm.variance_thresholds = numpy.array([[0, 0], [0, 0]], 'float64')

  stats = GMMStats(2, 2)
  gmm.acc_statistics(arrayset, stats)

  stats_ref = GMMStats(bob.io.base.HDF5File(datafile("stats.hdf5",__name__, path="../data/")))

  assert stats.t == stats_ref.t
  assert numpy.allclose(stats.n, stats_ref.n, atol=1e-10)
  #assert numpy.array_equal(stats.sumPx, stats_ref.sumPx)
  #Note AA: precision error above
  assert numpy.allclose(stats.sum_px, stats_ref.sum_px, atol=1e-10)
  assert numpy.allclose(stats.sum_pxx, stats_ref.sum_pxx, atol=1e-10)
Beispiel #33
0
def test_int32_histoPython():
  # Compute the histogram of a int32 random array
  # Generate random int32 array
  #input_array = numpy.ndarray((50, 70), 'int32')
  #random_int(input_array, -20,20)
  #bob.io.base.save(input_array,os.path.join('histo','input_int32.hdf5'))

  input_array = bob.io.base.load(datafile('input_int32.hdf5', 'bob.ip.base', 'data/histo'))
  histo2 = numpy.ndarray((41,), numpy.uint64)

  histo1 = bob.ip.base.histogram(input_array, (-20, 20), 41)
  bob.ip.base.histogram(input_array, (-20, 20), histo2)

  # Save computed data
  #bob.io.base.save(histo, os.path.join('histo','input_int32.histo.hdf5'))
  histo_ref = bob.io.base.load(datafile('input_int32.histo.hdf5', 'bob.ip.base', 'data/histo'))

  assert input_array.size == histo1.sum()
  assert input_array.size == histo2.sum()
  assert (histo_ref == histo1).all()
  assert (histo_ref == histo2).all()
Beispiel #34
0
def test_processing():
    # Processing tests
    A = bob.io.base.load(datafile("vlimg_ref.hdf5", "bob.ip.base",
                                  "data/sift"))
    No = 3
    Ns = 3
    sigma0 = 1.6
    sigma_n = 0.5
    f = 4.
    op = bob.ip.base.GaussianScaleSpace(A.shape, Ns, No, 0, sigma_n, sigma0, f)
    pyr = op(A)

    import math
    # Assumes that octave_min = 0
    dsigma0 = sigma0 * math.sqrt(1. - math.pow(2, -2. / float(Ns)))
    Aa = A
    for o in range(No):
        for s in range(-1, Ns + 2):
            # Get Gaussian for this scale
            g_pyr = op.get_gaussian(s + 1)

            # Filtering step
            if s != -1 or (o == 0 and s == -1):
                # Compute scale and radius
                if (o == 0 and s == -1):
                    sa = sigma0  #* math.pow(2.,s/float(Ns))
                    sb = sigma_n
                    sigma = math.sqrt(sa * sa - sb * sb)
                else:
                    sigma = dsigma0 * math.pow(2, s / float(Ns))
                radius = int(math.ceil(f * sigma))
                # Check values
                assert abs(sigma - g_pyr.sigma[0]) < eps
                assert abs(sigma - g_pyr.sigma[1]) < eps
                assert abs(radius - g_pyr.radius[0]) < eps
                assert abs(radius - g_pyr.radius[1]) < eps

                g = bob.ip.base.Gaussian((sigma, sigma), (radius, radius))
                B = g(Aa)
            # Downsampling step
            else:
                # Select image as by VLfeat (seems wrong to me)
                Aa = pyr[o - 1][Ns, :, :]
                # Downsample using a trick to make sure that if the length is l=2p+1,
                # the new one is p and not p+1.
                B = Aa[:2 * (int(Aa.shape[0] / 2)):2, :2 *
                       (int(Aa.shape[1] / 2)):2]

            # Compare image of the pyramids (Python implementation vs. C++)
            Bpyr = pyr[o][s + 1, :, :]
            assert numpy.allclose(B, Bpyr, eps)
            Aa = B
Beispiel #35
0
def test_matlab_baseline():
    """

    Tests based on this matlab baseline

    http://www-scf.usc.edu/~boqinggo/domainadaptation.html#intro

    """
    import numpy
    numpy.random.seed(10)

    source_webcam = bob.io.matlab.read_matrix(datafile("webcam.mat", __name__))
    webcam_labels = bob.io.matlab.read_matrix(datafile("webcam_labels.mat", __name__))

    target_dslr = bob.io.matlab.read_matrix(datafile("dslr.mat", __name__))
    dslr_labels = bob.io.matlab.read_matrix(datafile("dslr_labels.mat", __name__))

    gfk_trainer = GFKTrainer(10, subspace_dim_source=140, subspace_dim_target=140)
    gfk_machine = gfk_trainer.train(source_webcam, target_dslr)

    accuracy = compute_accuracy(gfk_machine.G, source_webcam, webcam_labels, target_dslr, dslr_labels) * 100
    assert accuracy > 70
Beispiel #36
0
def test_custom_trainer():

  # Custom python trainer

  ar = bob.io.base.load(datafile("faithful.torch3_f64.hdf5", __name__, path="../data/"))

  mytrainer = MyTrainer1()

  machine = KMeansMachine(2, 2)
  mytrainer.train(machine, ar)

  for i in range(0, 2):
    assert (ar[i+1] == machine.means[i, :]).all()
Beispiel #37
0
def test_float_histoPython():
  # Compute the histogram of a float random array
  # Generate random float32 array
  #input_array = numpy.ndarray((50, 70), 'float32')
  #random_float(input_array, 0, 1)
  #bob.io.base.save(input_array, os.path.join('histo','input_float.hdf5'))

  input_array = bob.io.base.load(datafile('input_float.hdf5', 'bob.ip.base', 'data/histo'))
  histo2 = numpy.ndarray((10,), numpy.uint64)

  histo1 = bob.ip.base.histogram(input_array, (0, 1), 10)
  bob.ip.base.histogram(input_array, (0, 1), histo2)

  # Save computed data
  #bob.io.base.save(histo1,os.path.join('histo','input_float.histo.hdf5'))

  histo_ref = bob.io.base.load(datafile('input_float.histo.hdf5', 'bob.ip.base', 'data/histo'))

  assert input_array.size == histo1.sum()
  assert input_array.size == histo2.sum()
  assert (histo_ref == histo1).all()
  assert (histo_ref == histo2).all()
def test_kmeans_b():

  # Trains a KMeansMachine
  (arStd,std) = NormalizeStdArray(datafile("faithful.torch3.hdf5", __name__, path="../data/"))

  machine = KMeansMachine(2, 2)

  trainer = KMeansTrainer()
  #trainer.seed = 1337
  bob.learn.em.train(trainer,machine, arStd, convergence_threshold=0.001)

  [variances, weights] = machine.get_variances_and_weights_for_each_cluster(arStd)

  means = numpy.array(machine.means)
  variances = numpy.array(variances)

  multiplyVectorsByFactors(means, std)
  multiplyVectorsByFactors(variances, std ** 2)

  gmmWeights = bob.io.base.load(datafile('gmm.init_weights.hdf5', __name__, path="../data/"))
  gmmMeans = bob.io.base.load(datafile('gmm.init_means.hdf5', __name__, path="../data/"))
  gmmVariances = bob.io.base.load(datafile('gmm.init_variances.hdf5', __name__, path="../data/"))

  if (means[0, 0] < means[1, 0]):
    means = flipRows(means)
    variances = flipRows(variances)
    weights = flipRows(weights)

  assert equals(means, gmmMeans, 1e-3)
  assert equals(weights, gmmWeights, 1e-3)
  assert equals(variances, gmmVariances, 1e-3)

  # Check that there is no duplicate means during initialization
  machine = KMeansMachine(2, 1)
  trainer = KMeansTrainer()
  trainer.initialization_method = 'RANDOM_NO_DUPLICATE'
  data = numpy.array([[1.], [1.], [1.], [1.], [1.], [1.], [2.], [3.]])
  bob.learn.em.train(trainer, machine, data)
  assert (numpy.isnan(machine.means).any()) == False
Beispiel #39
0
def test_kmeans_b():
    # Trains a KMeansMachine
    (arStd, std) = NormalizeStdArray(datafile("faithful.torch3.hdf5", __name__, path="../data/"))

    machine = KMeansMachine(2, 2)

    trainer = KMeansTrainer()
    # trainer.seed = 1337
    bob.learn.em.train(trainer, machine, arStd, convergence_threshold=0.001)

    [variances, weights] = machine.get_variances_and_weights_for_each_cluster(arStd)

    means = numpy.array(machine.means)
    variances = numpy.array(variances)

    multiplyVectorsByFactors(means, std)
    multiplyVectorsByFactors(variances, std ** 2)

    gmmWeights = bob.io.base.load(datafile('gmm.init_weights.hdf5', __name__, path="../data/"))
    gmmMeans = bob.io.base.load(datafile('gmm.init_means.hdf5', __name__, path="../data/"))
    gmmVariances = bob.io.base.load(datafile('gmm.init_variances.hdf5', __name__, path="../data/"))

    if (means[0, 0] < means[1, 0]):
        means = flipRows(means)
        variances = flipRows(variances)
        weights = flipRows(weights)

    assert equals(means, gmmMeans, 1e-3)
    assert equals(weights, gmmWeights, 1e-3)
    assert equals(variances, gmmVariances, 1e-3)

    # Check that there is no duplicate means during initialization
    machine = KMeansMachine(2, 1)
    trainer = KMeansTrainer()
    trainer.initialization_method = 'RANDOM_NO_DUPLICATE'
    data = numpy.array([[1.], [1.], [1.], [1.], [1.], [1.], [2.], [3.]])
    bob.learn.em.train(trainer, machine, data)
    assert (numpy.isnan(machine.means).any()) == False
Beispiel #40
0
def test_uint16_histoPython():

    # Compute the histogram of a uint16 random array

    # Generate random uint16 array
    #input_array = numpy.ndarray((50, 70), 'uint16')
    #random_int(input_array, 0, 65535)
    #bob.io.base.save(input_array, os.path.join('histo','input_uint16.hdf5'))

    input_array = bob.io.base.load(
        datafile('input_uint16.hdf5', 'bob.ip.base', 'data/histo'))

    histo1 = bob.ip.base.histogram(input_array)
    histo2 = bob.ip.base.histogram(input_array, 65536)
    histo3 = bob.ip.base.histogram(input_array, (0, 65535), 65536)

    histo4 = numpy.ndarray((65536, ), numpy.uint64)
    histo5 = numpy.ndarray((65536, ), numpy.uint64)

    bob.ip.base.histogram(input_array, histo4)
    bob.ip.base.histogram(input_array, (0, 65535), histo5)

    # Save computed data
    #bob.io.base.save(histo1, os.path.join('histo','input_uint16.histo.hdf5'))

    histo_ref = bob.io.base.load(
        datafile('input_uint16.histo.hdf5', 'bob.ip.base', 'data/histo'))

    assert input_array.size == histo1.sum()
    assert input_array.size == histo2.sum()
    assert input_array.size == histo3.sum()
    assert input_array.size == histo4.sum()
    assert input_array.size == histo5.sum()
    assert (histo_ref == histo1).all()
    assert (histo_ref == histo2).all()
    assert (histo_ref == histo3).all()
    assert (histo_ref == histo4).all()
    assert (histo_ref == histo5).all()
def test_processing():
  # Processing tests
  A = bob.io.base.load(datafile("vlimg_ref.hdf5", "bob.ip.base", "data/sift"))
  No = 3
  Ns = 3
  sigma0 = 1.6
  sigma_n = 0.5
  f=4.
  op = bob.ip.base.GaussianScaleSpace(A.shape,Ns,No,0,sigma_n,sigma0,f)
  pyr = op(A)

  import math
  # Assumes that octave_min = 0
  dsigma0 = sigma0 * math.sqrt(1.-math.pow(2,-2./float(Ns)))
  Aa = A
  for o in range(No):
    for s in range(-1,Ns+2):
      # Get Gaussian for this scale
      g_pyr = op.get_gaussian(s+1)

      # Filtering step
      if s!=-1 or (o==0 and s==-1):
        # Compute scale and radius
        if(o==0 and s==-1):
          sa = sigma0 #* math.pow(2.,s/float(Ns))
          sb = sigma_n
          sigma = math.sqrt(sa*sa - sb*sb)
        else:
          sigma = dsigma0 * math.pow(2,s/float(Ns))
        radius = int(math.ceil(f*sigma))
        # Check values
        assert abs(sigma - g_pyr.sigma[0]) < eps
        assert abs(sigma - g_pyr.sigma[1]) < eps
        assert abs(radius - g_pyr.radius[0]) < eps
        assert abs(radius - g_pyr.radius[1]) < eps

        g = bob.ip.base.Gaussian((sigma, sigma), (radius, radius))
        B = g(Aa)
      # Downsampling step
      else:
        # Select image as by VLfeat (seems wrong to me)
        Aa = pyr[o-1][Ns,:,:]
        # Downsample using a trick to make sure that if the length is l=2p+1,
        # the new one is p and not p+1.
        B = Aa[:2*(int(Aa.shape[0]/2)):2,:2*(int(Aa.shape[1]/2)):2]

      # Compare image of the pyramids (Python implementation vs. C++)
      Bpyr = pyr[o][s+1,:,:]
      assert numpy.allclose(B, Bpyr, eps)
      Aa = B
Beispiel #42
0
def test_int32_histoPython():
    # Compute the histogram of a int32 random array
    # Generate random int32 array
    #input_array = numpy.ndarray((50, 70), 'int32')
    #random_int(input_array, -20,20)
    #bob.io.base.save(input_array,os.path.join('histo','input_int32.hdf5'))

    input_array = bob.io.base.load(
        datafile('input_int32.hdf5', 'bob.ip.base', 'data/histo'))
    histo2 = numpy.ndarray((41, ), numpy.uint64)

    histo1 = bob.ip.base.histogram(input_array, (-20, 20), 41)
    bob.ip.base.histogram(input_array, (-20, 20), histo2)

    # Save computed data
    #bob.io.base.save(histo, os.path.join('histo','input_int32.histo.hdf5'))
    histo_ref = bob.io.base.load(
        datafile('input_int32.histo.hdf5', 'bob.ip.base', 'data/histo'))

    assert input_array.size == histo1.sum()
    assert input_array.size == histo2.sum()
    assert (histo_ref == histo1).all()
    assert (histo_ref == histo2).all()
Beispiel #43
0
def test_custom_trainer():

    # Custom python trainer

    ar = bob.io.base.load(
        datafile("faithful.torch3_f64.hdf5", __name__, path="../data/"))

    mytrainer = MyTrainer1()

    machine = KMeansMachine(2, 2)
    mytrainer.train(machine, ar)

    for i in range(0, 2):
        assert (ar[i + 1] == machine.means[i, :]).all()
Beispiel #44
0
def test_GMMMachine_4():

    import numpy
    numpy.random.seed(3)  # FIXING A SEED

    data = numpy.random.rand(
        100, 50
    )  #Doesn't matter if it is ramdom. The average of 1D array (in python) MUST output the same result for the 2D array (in C++)

    gmm = GMMMachine(2, 50)
    gmm.weights = bob.io.base.load(
        datafile('weights.hdf5', __name__, path="../data/"))
    gmm.means = bob.io.base.load(
        datafile('means.hdf5', __name__, path="../data/"))
    gmm.variances = bob.io.base.load(
        datafile('variances.hdf5', __name__, path="../data/"))

    ll = 0
    for i in range(data.shape[0]):
        ll += gmm(data[i, :])
    ll /= data.shape[0]

    assert ll == gmm(data)
Beispiel #45
0
def test_processing():
    # Processing tests
    A = bob.io.base.load(datafile("vlimg_ref.hdf5", 'bob.ip.base',
                                  'data/sift'))
    No = 3
    Ns = 3
    sigma0 = 1.6
    sigma_n = 0.5
    cont_t = 0.03
    edge_t = 10.
    norm_t = 0.2
    f = 4.
    op = bob.ip.base.SIFT(A.shape, Ns, No, 0, sigma_n, sigma0, cont_t, edge_t,
                          norm_t, f, bob.sp.BorderType.NearestNeighbour)
    kp = [bob.ip.base.GSSKeypoint(1.6, (326, 270))]
    B = numpy.ndarray(op.output_shape(1), numpy.float64)
    op.compute_descriptor(A, kp, B)
    C = B[0]
    #bob.io.base.save(C, datafile(os.path.join("sift","vlimg_ref_cmp.hdf5"), __name__)) # Generated using initial bob version
    C_ref = bob.io.base.load(
        datafile("vlimg_ref_cmp.hdf5", 'bob.ip.base', 'data/sift'))
    assert numpy.allclose(C, C_ref, 1e-5, 1e-5)
    """
Beispiel #46
0
def test_gmm_ML_1():

    # Trains a GMMMachine with ML_GMMTrainer

    ar = bob.io.base.load(
        datafile("faithful.torch3_f64.hdf5", __name__, path="../data/"))
    gmm = loadGMM()

    # test rng handling
    ml_gmmtrainer = ML_GMMTrainer(True, True, True)
    rng = bob.core.random.mt19937(12345)
    bob.learn.em.train(ml_gmmtrainer,
                       gmm,
                       ar,
                       convergence_threshold=0.001,
                       rng=rng)

    gmm = loadGMM()
    ml_gmmtrainer = ML_GMMTrainer(True, True, True)
    #ml_gmmtrainer.train(gmm, ar)
    bob.learn.em.train(ml_gmmtrainer, gmm, ar, convergence_threshold=0.001)

    #config = bob.io.base.HDF5File(datafile('gmm_ML.hdf5", __name__), 'w')
    #gmm.save(config)

    gmm_ref = GMMMachine(
        bob.io.base.HDF5File(datafile('gmm_ML.hdf5', __name__,
                                      path="../data/")))
    gmm_ref_32bit_debug = GMMMachine(
        bob.io.base.HDF5File(
            datafile('gmm_ML_32bit_debug.hdf5', __name__, path="../data/")))
    gmm_ref_32bit_release = GMMMachine(
        bob.io.base.HDF5File(
            datafile('gmm_ML_32bit_release.hdf5', __name__, path="../data/")))

    assert (gmm == gmm_ref) or (gmm == gmm_ref_32bit_release) or (
        gmm == gmm_ref_32bit_release)
Beispiel #47
0
def test_uint16_histoPython():

  # Compute the histogram of a uint16 random array

  # Generate random uint16 array
  #input_array = numpy.ndarray((50, 70), 'uint16')
  #random_int(input_array, 0, 65535)
  #bob.io.base.save(input_array, os.path.join('histo','input_uint16.hdf5'))

  input_array = bob.io.base.load(datafile('input_uint16.hdf5', 'bob.ip.base', 'data/histo'))

  histo1 = bob.ip.base.histogram(input_array)
  histo2 = bob.ip.base.histogram(input_array, 65536)
  histo3 = bob.ip.base.histogram(input_array, (0, 65535), 65536)

  histo4 = numpy.ndarray((65536,), numpy.uint64)
  histo5 = numpy.ndarray((65536,), numpy.uint64)

  bob.ip.base.histogram(input_array, histo4)
  bob.ip.base.histogram(input_array, (0, 65535), histo5)

  # Save computed data
  #bob.io.base.save(histo1, os.path.join('histo','input_uint16.histo.hdf5'))

  histo_ref = bob.io.base.load(datafile('input_uint16.histo.hdf5', 'bob.ip.base', 'data/histo'))

  assert input_array.size == histo1.sum()
  assert input_array.size == histo2.sum()
  assert input_array.size == histo3.sum()
  assert input_array.size == histo4.sum()
  assert input_array.size == histo5.sum()
  assert (histo_ref == histo1).all()
  assert (histo_ref == histo2).all()
  assert (histo_ref == histo3).all()
  assert (histo_ref == histo4).all()
  assert (histo_ref == histo5).all()
Beispiel #48
0
def test_GMMMachine_2():
    # Test a GMMMachine (statistics)

    arrayset = bob.io.base.load(
        datafile("faithful.torch3_f64.hdf5", __name__, path="../data/"))
    gmm = GMMMachine(2, 2)
    gmm.weights = numpy.array([0.5, 0.5], 'float64')
    gmm.means = numpy.array([[3, 70], [4, 72]], 'float64')
    gmm.variances = numpy.array([[1, 10], [2, 5]], 'float64')
    gmm.variance_thresholds = numpy.array([[0, 0], [0, 0]], 'float64')

    stats = GMMStats(2, 2)
    gmm.acc_statistics(arrayset, stats)

    stats_ref = GMMStats(
        bob.io.base.HDF5File(datafile("stats.hdf5", __name__,
                                      path="../data/")))

    assert stats.t == stats_ref.t
    assert numpy.allclose(stats.n, stats_ref.n, atol=1e-10)
    #assert numpy.array_equal(stats.sumPx, stats_ref.sumPx)
    #Note AA: precision error above
    assert numpy.allclose(stats.sum_px, stats_ref.sum_px, atol=1e-10)
    assert numpy.allclose(stats.sum_pxx, stats_ref.sum_pxx, atol=1e-10)
Beispiel #49
0
def test_float_histoPython():
    # Compute the histogram of a float random array
    # Generate random float32 array
    #input_array = numpy.ndarray((50, 70), 'float32')
    #random_float(input_array, 0, 1)
    #bob.io.base.save(input_array, os.path.join('histo','input_float.hdf5'))

    input_array = bob.io.base.load(
        datafile('input_float.hdf5', 'bob.ip.base', 'data/histo'))
    histo2 = numpy.ndarray((10, ), numpy.uint64)

    histo1 = bob.ip.base.histogram(input_array, (0, 1), 10)
    bob.ip.base.histogram(input_array, (0, 1), histo2)

    # Save computed data
    #bob.io.base.save(histo1,os.path.join('histo','input_float.histo.hdf5'))

    histo_ref = bob.io.base.load(
        datafile('input_float.histo.hdf5', 'bob.ip.base', 'data/histo'))

    assert input_array.size == histo1.sum()
    assert input_array.size == histo2.sum()
    assert (histo_ref == histo1).all()
    assert (histo_ref == histo2).all()
Beispiel #50
0
def test_all():

    # array writing tests
    a1 = numpy.random.normal(size=(2, 3)).astype('float32')
    a2 = numpy.random.normal(size=(2, 3, 4)).astype('float64')
    a3 = numpy.random.normal(size=(2, 3, 4, 5)).astype('complex128')
    a4 = (10 * numpy.random.normal(size=(3, 3))).astype('uint64')

    array_readwrite('.mat', a1)
    array_readwrite(".mat", a2)
    array_readwrite('.mat', a3)
    array_readwrite(".mat", a4)

    # arrayset writing tests
    a1 = []
    a2 = []
    a3 = []
    a4 = []
    for k in range(10):
        a1.append(numpy.random.normal(size=(2, 3)).astype('float32'))
        a2.append(numpy.random.normal(size=(2, 3, 4)).astype('float64'))
        a3.append(numpy.random.normal(size=(2, 3, 4, 5)).astype('complex128'))
        a4.append((10 * numpy.random.normal(size=(3, 3))).astype('uint64'))

    arrayset_readwrite('.mat', a1)
    arrayset_readwrite(".mat", a2)
    arrayset_readwrite('.mat', a3)
    arrayset_readwrite(".mat", a4)

    # complete transcoding tests
    transcode(test_utils.datafile(
        'test_1d.mat', __name__))  #pseudo 1D - matlab does not support true 1D
    transcode(test_utils.datafile('test_2d.mat', __name__))
    transcode(test_utils.datafile('test_3d.mat', __name__))
    transcode(test_utils.datafile('test_4d.mat', __name__))
    transcode(test_utils.datafile(
        'test_1d_cplx.mat', __name__))  #pseudo 1D - matlab does not support 1D
    transcode(test_utils.datafile('test_2d_cplx.mat', __name__))
    transcode(test_utils.datafile('test_3d_cplx.mat', __name__))
    transcode(test_utils.datafile('test_4d_cplx.mat', __name__))
    transcode(test_utils.datafile('test.mat', __name__))  #3D complex, large
Beispiel #51
0
def test_interface():

  # test that we can read the 'x' variable in the test file
  cell_file = test_utils.datafile('test_2d.mat', __name__)
  sorted(['x', 'y']) == sorted(read_varnames(cell_file))

  # read x matrix
  x = read_matrix(cell_file, 'x')
  assert x.shape == (2,3)
  y = read_matrix(cell_file, 'y')
  assert y.shape == (3,2)

  for i in range(2):
    for j in range(3):
      assert x[i,j] == float(j*2+i+1)
      assert y[j,i] == float(j*2+i+1)
Beispiel #52
0
def test_gmm_ML_2():

    # Trains a GMMMachine with ML_GMMTrainer; compares to an old reference

    ar = bob.io.base.load(
        datafile('dataNormalized.hdf5', __name__, path="../data/"))

    # Initialize GMMMachine
    gmm = GMMMachine(5, 45)
    gmm.means = bob.io.base.load(
        datafile('meansAfterKMeans.hdf5', __name__,
                 path="../data/")).astype('float64')
    gmm.variances = bob.io.base.load(
        datafile('variancesAfterKMeans.hdf5', __name__,
                 path="../data/")).astype('float64')
    gmm.weights = numpy.exp(
        bob.io.base.load(
            datafile('weightsAfterKMeans.hdf5', __name__,
                     path="../data/")).astype('float64'))

    threshold = 0.001
    gmm.set_variance_thresholds(threshold)

    # Initialize ML Trainer
    prior = 0.001
    max_iter_gmm = 25
    accuracy = 0.00001
    ml_gmmtrainer = ML_GMMTrainer(True, True, True, prior)

    # Run ML
    #ml_gmmtrainer.train(gmm, ar)
    bob.learn.em.train(ml_gmmtrainer,
                       gmm,
                       ar,
                       max_iterations=max_iter_gmm,
                       convergence_threshold=accuracy)

    # Test results
    # Load torch3vision reference
    meansML_ref = bob.io.base.load(
        datafile('meansAfterML.hdf5', __name__, path="../data/"))
    variancesML_ref = bob.io.base.load(
        datafile('variancesAfterML.hdf5', __name__, path="../data/"))
    weightsML_ref = bob.io.base.load(
        datafile('weightsAfterML.hdf5', __name__, path="../data/"))

    # Compare to current results
    assert equals(gmm.means, meansML_ref, 3e-3)
    assert equals(gmm.variances, variancesML_ref, 3e-3)
    assert equals(gmm.weights, weightsML_ref, 1e-4)
Beispiel #53
0
def test_interface():

    # test that we can read the 'x' variable in the test file
    cell_file = test_utils.datafile('test_2d.mat', __name__)
    sorted(['x', 'y']) == sorted(read_varnames(cell_file))

    # read x matrix
    x = read_matrix(cell_file, 'x')
    assert x.shape == (2, 3)
    y = read_matrix(cell_file, 'y')
    assert y.shape == (3, 2)

    for i in range(2):
        for j in range(3):
            assert x[i, j] == float(j * 2 + i + 1)
            assert y[j, i] == float(j * 2 + i + 1)
Beispiel #54
0
def test_io():

  raise SkipTest("TODO: Not fully implemented yet")

  # Checks that the IO functionality of LBP works
  test_file = datafile("LBP.hdf5", __name__)
  temp_file = temporary_filename()

  # create file
  lbp1 = bob.ip.base.LBP(8, (2,3), elbp_type="transitional", to_average=True, add_average_bit=True)
  lbp2 = bob.ip.base.LBP(16, 4., 2., uniform=True, rotation_invariant=True, circular=True)

  # re-generate the reference file, if wanted
  f = bob.io.base.HDF5File(temp_file, 'w')
  f.create_group("LBP1")
  f.create_group("LBP2")
  f.cd("/LBP1")
  lbp1.save(f)
  f.cd("/LBP2")
  lbp2.save(f)
  del f

  # load the file again
  f = bob.io.base.HDF5File(temp_file)
  f.cd("/LBP1")
  read1 = bob.ip.base.LBP(f)
  f.cd("/LBP2")
  read2 = bob.ip.base.LBP(f)
  del f

  # assert that the created and the read object are identical
  assert lbp1 == read1
  assert lbp2 == read2

  # load the reference file
  f = bob.io.base.HDF5File(test_file)
  f.cd("/LBP1")
  ref1 = bob.ip.base.LBP(f)
  f.cd("/LBP2")
  ref2 = bob.ip.base.LBP(f)
  del f

  # assert that the lbp objects and the reference ones are identical
  assert lbp1 == ref1
  assert lbp2 == ref2
  assert read1 == ref1
  assert read2 == ref2
Beispiel #55
0
def test_all():

  # array writing tests
  a1 = numpy.random.normal(size=(2,3)).astype('float32')
  a2 = numpy.random.normal(size=(2,3,4)).astype('float64')
  a3 = numpy.random.normal(size=(2,3,4,5)).astype('complex128')
  a4 = (10 * numpy.random.normal(size=(3,3))).astype('uint64')

  array_readwrite('.mat', a1)
  array_readwrite(".mat", a2)
  array_readwrite('.mat', a3)
  array_readwrite(".mat", a4)

  # arrayset writing tests
  a1 = []
  a2 = []
  a3 = []
  a4 = []
  for k in range(10):
    a1.append(numpy.random.normal(size=(2,3)).astype('float32'))
    a2.append(numpy.random.normal(size=(2,3,4)).astype('float64'))
    a3.append(numpy.random.normal(size=(2,3,4,5)).astype('complex128'))
    a4.append((10*numpy.random.normal(size=(3,3))).astype('uint64'))

  arrayset_readwrite('.mat', a1)
  arrayset_readwrite(".mat", a2)
  arrayset_readwrite('.mat', a3)
  arrayset_readwrite(".mat", a4)

  # complete transcoding tests
  transcode(test_utils.datafile('test_1d.mat', __name__)) #pseudo 1D - matlab does not support true 1D
  transcode(test_utils.datafile('test_2d.mat', __name__))
  transcode(test_utils.datafile('test_3d.mat', __name__))
  transcode(test_utils.datafile('test_4d.mat', __name__))
  transcode(test_utils.datafile('test_1d_cplx.mat', __name__)) #pseudo 1D - matlab does not support 1D
  transcode(test_utils.datafile('test_2d_cplx.mat', __name__))
  transcode(test_utils.datafile('test_3d_cplx.mat', __name__))
  transcode(test_utils.datafile('test_4d_cplx.mat', __name__))
  transcode(test_utils.datafile('test.mat', __name__)) #3D complex, large
Beispiel #56
0
def test_image_load():
  # test that the generic bob.io.image.load function works as expected
  for filename in ('test.jpg', 'cmyk.jpg', 'test.pbm', 'test_corrupted.pbm',
      'test.pgm', 'test_corrupted.pgm', 'test_spaces.pgm', 'test.ppm',
      'test_corrupted.ppm', 'img_rgba_color.png', 'test.gif'):
    full_file = test_utils.datafile(filename, __name__)
    # load with just image name
    i1 = bob.io.image.load(full_file)
    # load with image name and extension
    i2 = bob.io.image.load(full_file, os.path.splitext(full_file)[1])
    assert numpy.array_equal(i1,i2)
    # load with image name and automatically estimated extension
    i3 = bob.io.image.load(full_file, 'auto')
    assert numpy.array_equal(i1,i3)

    # assert that unknown extensions raise exceptions
    nose.tools.assert_raises(RuntimeError, lambda x: bob.io.image.load(x, ".unknown"), full_file)
Beispiel #57
0
def test_gmm_MAP_3():

  # Train a GMMMachine with MAP_GMMTrainer; compares to old reference

  ar = bob.io.base.load(datafile('dataforMAP.hdf5', __name__, path="../data/"))

  # Initialize GMMMachine
  n_gaussians = 5
  n_inputs = 45
  prior_gmm = GMMMachine(n_gaussians, n_inputs)
  prior_gmm.means = bob.io.base.load(datafile('meansAfterML.hdf5', __name__, path="../data/"))
  prior_gmm.variances = bob.io.base.load(datafile('variancesAfterML.hdf5', __name__, path="../data/"))
  prior_gmm.weights = bob.io.base.load(datafile('weightsAfterML.hdf5', __name__, path="../data/"))

  threshold = 0.001
  prior_gmm.set_variance_thresholds(threshold)

  # Initialize MAP Trainer
  relevance_factor = 0.1
  prior = 0.001
  max_iter_gmm = 1
  accuracy = 0.00001
  map_factor = 0.5
  map_gmmtrainer = MAP_GMMTrainer(prior_gmm, alpha=map_factor, update_means=True, update_variances=False, update_weights=False, mean_var_update_responsibilities_threshold=accuracy)
  #map_gmmtrainer.max_iterations = max_iter_gmm
  #map_gmmtrainer.convergence_threshold = accuracy

  gmm = GMMMachine(n_gaussians, n_inputs)
  gmm.set_variance_thresholds(threshold)

  # Train
  #map_gmmtrainer.train(gmm, ar)
  bob.learn.em.train(map_gmmtrainer, gmm, ar, max_iterations = max_iter_gmm, convergence_threshold=prior)

  # Test results
  # Load torch3vision reference
  meansMAP_ref = bob.io.base.load(datafile('meansAfterMAP.hdf5', __name__, path="../data/"))
  variancesMAP_ref = bob.io.base.load(datafile('variancesAfterMAP.hdf5', __name__, path="../data/"))
  weightsMAP_ref = bob.io.base.load(datafile('weightsAfterMAP.hdf5', __name__, path="../data/"))

  # Compare to current results
  # Gaps are quite large. This might be explained by the fact that there is no
  # adaptation of a given Gaussian in torch3 when the corresponding responsibilities
  # are below the responsibilities threshold
  assert equals(gmm.means, meansMAP_ref, 2e-1)
  assert equals(gmm.variances, variancesMAP_ref, 1e-4)
  assert equals(gmm.weights, weightsMAP_ref, 1e-4)
Beispiel #58
0
def test_video_as_array_vs_dask():
    import dask

    path = datafile("testvideo.avi", "bob.bio.video.test")
    start = time.time()
    video = bob.bio.video.VideoAsArray(path, selection_style="all")
    video = dask.array.from_array(video, (20, 1, 480, 640))
    video = video.compute(scheduler="single-threaded")
    load_time = time.time() - start

    start = time.time()
    reference = to_bob(np.array(list((imageio.get_reader(path).iter_data()))))
    load_time2 = time.time() - start
    # Here, we're also chunking each frame, but normally we would only chunk the first axis.
    print(
        f"FYI: It took {load_time:.2f} s to load the video with dask and {load_time2:.2f} s "
        "to load directly. The slower loading with dask is expected.")
    np.testing.assert_allclose(reference, video)
Beispiel #59
0
def test_gmm_ML_2():

  # Trains a GMMMachine with ML_GMMTrainer; compares to an old reference

  ar = bob.io.base.load(datafile('dataNormalized.hdf5', __name__, path="../data/"))

  # Initialize GMMMachine
  gmm = GMMMachine(5, 45)
  gmm.means = bob.io.base.load(datafile('meansAfterKMeans.hdf5', __name__, path="../data/")).astype('float64')
  gmm.variances = bob.io.base.load(datafile('variancesAfterKMeans.hdf5', __name__, path="../data/")).astype('float64')
  gmm.weights = numpy.exp(bob.io.base.load(datafile('weightsAfterKMeans.hdf5', __name__, path="../data/")).astype('float64'))

  threshold = 0.001
  gmm.set_variance_thresholds(threshold)

  # Initialize ML Trainer
  prior = 0.001
  max_iter_gmm = 25
  accuracy = 0.00001
  ml_gmmtrainer = ML_GMMTrainer(True, True, True, prior)

  # Run ML
  #ml_gmmtrainer.train(gmm, ar)
  bob.learn.em.train(ml_gmmtrainer, gmm, ar, max_iterations = max_iter_gmm, convergence_threshold=accuracy)

  # Test results
  # Load torch3vision reference
  meansML_ref = bob.io.base.load(datafile('meansAfterML.hdf5', __name__, path="../data/"))
  variancesML_ref = bob.io.base.load(datafile('variancesAfterML.hdf5', __name__, path="../data/"))
  weightsML_ref = bob.io.base.load(datafile('weightsAfterML.hdf5', __name__, path="../data/"))


  # Compare to current results
  assert equals(gmm.means, meansML_ref, 3e-3)
  assert equals(gmm.variances, variancesML_ref, 3e-3)
  assert equals(gmm.weights, weightsML_ref, 1e-4)
Beispiel #60
0
import numpy
import math
import bob.io.base
import bob.ip.base
from bob.io.base.test_utils import datafile

# load a test image
face_image = bob.io.base.load(datafile('image_r10.hdf5', 'bob.ip.base', 'data/affine'))

# create FaceEyesNorm class
face_eyes_norm = bob.ip.base.FaceEyesNorm(eyes_distance = 65, crop_size = (128, 128), eyes_center = (32, 63.5))

# normalize image
normalized_image = face_eyes_norm( face_image, right_eye = (66, 47), left_eye = (62, 70) )

# plot results, including eye locations in original and normalized image
from matplotlib import pyplot
pyplot.figure(figsize=(8,4))
pyplot.subplot(121) ; pyplot.imshow(face_image, cmap='gray')       ; pyplot.plot([47, 70], [66, 62], 'rx', ms=10, mew=2); pyplot.axis('tight'); pyplot.title('Original Image')
pyplot.subplot(122) ; pyplot.imshow(normalized_image, cmap='gray') ; pyplot.plot([31, 96], [32, 32], 'rx', ms=10, mew=2); pyplot.axis('tight'); pyplot.title('Cropped Image')
pyplot.show()