def test_export_model_to_hdf5(file_stimuli, tmpdir):
    model = pysaliency.UniformModel()
    filename = str(tmpdir.join('model.hdf5'))
    export_model_to_hdf5(model, file_stimuli, filename)

    model2 = pysaliency.HDF5Model(file_stimuli, filename)
    for s in file_stimuli:
        np.testing.assert_allclose(model.log_density(s), model2.log_density(s))
Example #2
0
 def test_model(self):
     stimuli = pysaliency.Stimuli([np.random.randn(40, 40, 3),
                                   np.random.randn(40, 50, 3)])
     model = pysaliency.UniformModel()
     np.testing.assert_allclose(model.log_likelihoods(stimuli, self.f),
                                [-np.log(40*40),
                                 -np.log(40*40),
                                 -np.log(40*40),
                                 -np.log(40*40),
                                 -np.log(40*40),
                                 -np.log(40*50),
                                 -np.log(40*50),
                                 -np.log(40*50),
                                 ])
def test_crossval_splits(many_stimuli, crossval_folds, val_folds, test_folds):
    if not test_folds and val_folds != 1:
        return  # this case is raises an implementation error right now

    tmp_model = pysaliency.UniformModel()
    fixations = tmp_model.sample(many_stimuli, 100)

    train_stimuli = []
    train_fixations = []
    val_stimuli = []
    val_fixations = []
    test_stimuli = []
    test_fixations = []

    for _train_stimuli, _train_fixations, _val_stimuli, _val_fixations, _test_stimuli, _test_fixations in \
            filter_datasets.iterate_crossvalidation(many_stimuli, fixations, crossval_folds=crossval_folds, val_folds=val_folds, test_folds=test_folds, random=True):

        assert not set(_train_stimuli.stimulus_ids).intersection(
            _val_stimuli.stimulus_ids)
        assert not set(_train_stimuli.stimulus_ids).intersection(
            _test_stimuli.stimulus_ids)

        if not test_folds:  # otherwise test is validation
            assert not set(_val_stimuli.stimulus_ids).intersection(
                _test_stimuli.stimulus_ids)

        train_stimuli.append(_train_stimuli)
        train_fixations.append(_train_fixations)

        val_stimuli.append(_val_stimuli)
        val_fixations.append(_val_fixations)

        test_stimuli.append(_test_stimuli)
        test_fixations.append(_test_fixations)

    assert sum(len(s) for s in val_stimuli) == val_folds * len(many_stimuli)
    assert sum(len(s) for s in test_stimuli) == test_folds * len(many_stimuli)
    assert sum(len(s)
               for s in train_stimuli) == (crossval_folds - val_folds -
                                           test_folds) * len(many_stimuli)

    assert sum(len(f.x) for f in val_fixations) == val_folds * len(fixations.x)
    assert sum(len(f.x)
               for f in test_fixations) == test_folds * len(fixations.x)
    assert sum(len(f.x)
               for f in train_fixations) == (crossval_folds - val_folds -
                                             test_folds) * len(fixations.x)

    assert len(train_stimuli) == crossval_folds
def test_stratified_crossval_splits_multiple_attributes(
        many_stimuli, crossval_folds, val_folds, test_folds):
    if not test_folds and val_folds != 1:
        return  # this case is raises an implementation error right now

    tmp_model = pysaliency.UniformModel()
    fixations = tmp_model.sample(many_stimuli, 100)

    train_stimuli = []
    train_fixations = []
    val_stimuli = []
    val_fixations = []
    test_stimuli = []
    test_fixations = []

    for _train_stimuli, _train_fixations, _val_stimuli, _val_fixations, _test_stimuli, _test_fixations in \
            filter_datasets.iterate_crossvalidation(many_stimuli, fixations, crossval_folds=crossval_folds, val_folds=val_folds, test_folds=test_folds, random=True, stratified_attributes=['category', 'category2']):

        assert not set(_train_stimuli.stimulus_ids).intersection(
            _val_stimuli.stimulus_ids)
        assert not set(_train_stimuli.stimulus_ids).intersection(
            _test_stimuli.stimulus_ids)

        if not test_folds:  # otherwise test is validation
            assert not set(_val_stimuli.stimulus_ids).intersection(
                _test_stimuli.stimulus_ids)

        np.testing.assert_allclose(
            np.sum(_train_stimuli.attributes['category'] == 0),
            len(many_stimuli) / crossval_folds *
            (crossval_folds - val_folds - test_folds) * 0.1,
            atol=1)
        np.testing.assert_allclose(
            np.sum(_val_stimuli.attributes['category'] == 0),
            len(many_stimuli) / crossval_folds * val_folds * 0.1,
            atol=1)
        np.testing.assert_allclose(
            np.sum(_test_stimuli.attributes['category'] == 0),
            len(many_stimuli) / crossval_folds * test_folds * 0.1,
            atol=1)

        train_stimuli.append(_train_stimuli)
        train_fixations.append(_train_fixations)

        val_stimuli.append(_val_stimuli)
        val_fixations.append(_val_fixations)

        test_stimuli.append(_test_stimuli)
        test_fixations.append(_test_fixations)

    assert sum(len(s) for s in val_stimuli) == val_folds * len(many_stimuli)
    assert sum(len(s) for s in test_stimuli) == test_folds * len(many_stimuli)
    assert sum(len(s)
               for s in train_stimuli) == (crossval_folds - val_folds -
                                           test_folds) * len(many_stimuli)

    assert sum(len(f.x) for f in val_fixations) == val_folds * len(fixations.x)
    assert sum(len(f.x)
               for f in test_fixations) == test_folds * len(fixations.x)
    assert sum(len(f.x)
               for f in train_fixations) == (crossval_folds - val_folds -
                                             test_folds) * len(fixations.x)

    assert len(train_stimuli) == crossval_folds