예제 #1
0
def kldiv(p, q, distp = None, distq = None, scale_factor = 1):
    """
    Computes the Kullback-Leibler divergence between two distributions.

    Parameters
        p : Matrix
            The first probability distribution
        q : Matrix
            The second probability distribution
        distp : fixmat
            If p is None, distp is used to compute a FDM which 
            is then taken as 1st probability distribution.
        distq : fixmat
            If q is None, distq is used to compute a FDM which is 
            then taken as 2dn probability distribution.
        scale_factor : double
            Determines the size of FDM computed from distq or distp.

    """
    assert q != None or distq != None, "Either q or distq have to be given"
    assert p != None or distp != None, "Either p or distp have to be given"

    try:
        if p == None: 
            p = compute_fdm(distp, scale_factor = scale_factor)
        if q == None:
            q = compute_fdm(distq, scale_factor = scale_factor)
    except NoFixations:
        return np.NaN

    q += np.finfo(q.dtype).eps
    p += np.finfo(p.dtype).eps 
    kl = np.sum( p * (np.log2(p / q)))
    return kl
예제 #2
0
 def skip_emd(self):
     fm1 = fixmat.TestFactory(params={'image_size': [93, 128]})
     fm2 = fixmat.TestFactory(points=list(
         zip(list(range(10, 50)), list(range(10, 50)))),
                              params={'image_size': [93, 128]})
     self.assertEqual(measures.emd_model(fixmat.compute_fdm(fm1), fm1), 0)
     self.assertTrue(not (
         measures.emd_model(fixmat.compute_fdm(fm1), fm2) == 0))
예제 #3
0
 def test_corners(self):
     """ ``test_corners(self)``
     Tests whether handling of fixations in the corners is correct. 
     It manually generates an fdm with four fixations in the corners of a
     922x1272 array and compares it to the map generated by compute_fdm. 
     The difference between the maps must not be larger than the machine
     precision for floats.
     """
     yvec = [922, 922, 0, 0]
     xvec = [1272, 0, 1272, 0]
     self.fm = fixmat.TestFixmatFactory(points=(xvec, yvec))
     fdm = fixmat.compute_fdm(self.fm)
     # manually calculate the fdm
     fdm_man = np.zeros((922, 1272))
     fdm_man[0][0] = 1
     fdm_man[921][1271] = 1
     fdm_man[0][1271] = 1
     fdm_man[921][0] = 1
     # use default settings for fwhm, pixels_per_degree and scale_factor
     kernel_sigma = 2 * 36 * 1
     kernel_sigma = kernel_sigma / (2 * (2 * np.log(2))**.5)
     fdm_man = gaussian_filter(np.array(fdm_man),
                               kernel_sigma,
                               order=0,
                               mode='constant')
     fdm_man = fdm_man / fdm_man.sum()
     diff = fdm - fdm_man
     self.assertFalse((diff > np.finfo('float').eps).any())
예제 #4
0
    def test_kldiv(self):
        arr = scipy.random.random((21, 13))
        fm = fixmat.TestFixmatFactory(categories=[1, 2, 3],
                                      filenumbers=[1, 2, 3, 4, 5, 6],
                                      subjectindices=[1, 2, 3, 4, 5, 6],
                                      params={
                                          'pixels_per_degree': 10,
                                          'image_size': [100, 500]
                                      })

        kl = measures.kldiv(arr, arr)
        self.assertEqual(
            kl, 0, "KL Divergence between same distribution should be 0")
        kl = measures.kldiv(None, None, distp=fm, distq=fm, scale_factor=0.25)
        self.assertEqual(
            kl, 0, "KL Divergence between same distribution should be 0")
        fdm = fixmat.compute_fdm(fm)
        kl = measures.kldiv_model(fdm, fm)
        self.assertTrue(
            kl < 10**-13,
            "KL Divergence between same distribution should be almost 0")
        fm.x = np.array([])
        fm.y = np.array([])

        kl = measures.kldiv(None, None, distp=fm, distq=fm, scale_factor=0.25)
        self.assertTrue(np.isnan(kl))
예제 #5
0
 def test_corners(self):    
     """ ``test_corners(self)``
     Tests whether handling of fixations in the corners is correct. 
     It manually generates an fdm with four fixations in the corners of a
     922x1272 array and compares it to the map generated by compute_fdm. 
     The difference between the maps must not be larger than the machine
     precision for floats.
     """    
     yvec = [922, 922,0   ,0]
     xvec = [1272,0  ,1272,0]
     self.fm = fixmat.TestFixmatFactory(points = (xvec, yvec))
     fdm = fixmat.compute_fdm(self.fm)
     # manually calculate the fdm
     fdm_man = np.zeros((922,1272))
     fdm_man[0][0] = 1
     fdm_man[921][1271] = 1
     fdm_man[0][1271] = 1
     fdm_man[921][0] = 1        
     # use default settings for fwhm, pixels_per_degree and scale_factor
     kernel_sigma = 2 * 36 * 1
     kernel_sigma = kernel_sigma / (2 * (2 * np.log(2)) ** .5)
     fdm_man = gaussian_filter(np.array(fdm_man), kernel_sigma, order=0,
         mode='constant')
     fdm_man = fdm_man / fdm_man.sum()
     diff = fdm - fdm_man
     self.assertFalse((diff > np.finfo('float').eps).any())
예제 #6
0
 def test_nss(self):
     fm = fixmat.TestFixmatFactory(points=zip([0,500,1000],[1,10,10]),
             params = {'image_size':[100,10]})
     fm.SUBJECTINDEX = np.array([1,1,1])
     fm.filenumber = np.array([1,1,1])
     fm.category = np.array([1,1,1])
     fm.x = np.array([0,50,1000])
     fm.y = np.array([1,10,10])
     fm.fix = np.array([1,2,3])
     fm._num_fix = 3
     fdm = fixmat.compute_fdm(fm[(fm.x<10) & (fm.y<10)])
     self.assertRaises(IndexError, lambda: measures.nss(fdm, (fm.y, fm.x))) 
예제 #7
0
파일: bounds.py 프로젝트: eyequant/ocupy
def upper_bound(fm, nr_subs=None, scale_factor=1):
    """
    compute the inter-subject consistency upper bound for a fixmat.

    Input:
        fm : a fixmat instance
        nr_subs : the number of subjects used for the prediction. Defaults
                  to the total number of subjects in the fixmat minus 1
        scale_factor : the scale factor of the FDMs. Default is 1.
    Returns:
        A list of scores; the list contains one dictionary for each measure.
        Each dictionary contains one key for each category and corresponding
        values is an array with scores for each subject.
    """
    nr_subs_total = len(np.unique(fm.SUBJECTINDEX))
    if not nr_subs:
        nr_subs = nr_subs_total - 1
    assert (nr_subs < nr_subs_total)
    # initialize output structure; every measure gets one dict with
    # category numbers as keys and numpy-arrays as values
    intersub_scores = []
    for measure in range(len(measures.scores)):
        res_dict = {}
        result_vectors = [
            np.empty(nr_subs_total) + np.nan for _ in np.unique(fm.category)
        ]
        res_dict.update(zip(np.unique(fm.category), result_vectors))
        intersub_scores.append(res_dict)
    #compute inter-subject scores for every stimulus, with leave-one-out
    #over subjects
    for fm_cat in fm.by_field('category'):
        cat = fm_cat.category[0]
        for (sub_counter, sub) in enumerate(np.unique(fm_cat.SUBJECTINDEX)):
            image_scores = []
            for fm_single in fm_cat.by_field('filenumber'):
                predicting_subs = (np.setdiff1d(
                    np.unique(fm_single.SUBJECTINDEX), [sub]))
                np.random.shuffle(predicting_subs)
                predicting_subs = predicting_subs[0:nr_subs]
                predicting_fm = fm_single[(ismember(fm_single.SUBJECTINDEX,
                                                    predicting_subs))]
                predicted_fm = fm_single[fm_single.SUBJECTINDEX == sub]
                try:
                    predicting_fdm = compute_fdm(predicting_fm,
                                                 scale_factor=scale_factor)
                except RuntimeError:
                    predicting_fdm = None
                image_scores.append(
                    measures.prediction_scores(predicting_fdm, predicted_fm))
            for (measure, score) in enumerate(nanmean(image_scores, 0)):
                intersub_scores[measure][cat][sub_counter] = score
    return intersub_scores
예제 #8
0
def correlation_model(prediction, fm):
    """
    wraps numpy.corrcoef functionality for model evaluation

    input:
        prediction: 2D Matrix 
            the model salience map
        fm: fixmat 
            Used to compute a FDM to which the prediction is compared.
    """
    (_, r_x) = calc_resize_factor(prediction, fm.image_size)
    fdm = compute_fdm(fm, scale_factor = r_x)
    return np.corrcoef(fdm.flatten(), prediction.flatten())[0,1]
예제 #9
0
 def test_scaling(self):
     """``test_scaling(self)``
     Tests that the size of the resulting fdm corresponds to 
     image_size*scale_factor. Tests with different image sizes.
     """
     size = self.fm.image_size
     sf = [0.6, 1.6, 1.0 / 2.0]
     # if the scaling factor is not a float, this does not work but seems to
     # be a problem of the inbuilt function.
     for i in range(len(sf)):
         fdm = fixmat.compute_fdm(self.fm, scale_factor=sf[i])
         self.assertEqual((int(size[0] * sf[i]), int(size[1] * sf[i])),
                          np.shape(fdm))
예제 #10
0
 def test_scaling(self):
     """``test_scaling(self)``
     Tests that the size of the resulting fdm corresponds to 
     image_size*scale_factor. Tests with different image sizes.
     """
     size = self.fm.image_size
     sf = [0.6, 1.6, 1.0/2.0]
     # if the scaling factor is not a float, this does not work but seems to 
     # be a problem of the inbuilt function.
     for i in range(len(sf)):
         fdm = fixmat.compute_fdm(self.fm,scale_factor = sf[i])
         self.assertEqual((int(size[0]*sf[i]),int(size[1]*sf[i])),
             np.shape(fdm))
예제 #11
0
파일: bounds.py 프로젝트: nicoschmidt/ocupy
def upper_bound(fm, nr_subs = None, scale_factor = 1):
    """
    compute the inter-subject consistency upper bound for a fixmat.

    Input:
        fm : a fixmat instance
        nr_subs : the number of subjects used for the prediction. Defaults
                  to the total number of subjects in the fixmat minus 1
        scale_factor : the scale factor of the FDMs. Default is 1.
    Returns:
        A list of scores; the list contains one dictionary for each measure.
        Each dictionary contains one key for each category and corresponding
        values is an array with scores for each subject.
    """
    nr_subs_total = len(np.unique(fm.SUBJECTINDEX))
    if not nr_subs:
        nr_subs = nr_subs_total - 1
    assert (nr_subs < nr_subs_total)
    # initialize output structure; every measure gets one dict with
    # category numbers as keys and numpy-arrays as values
    intersub_scores = []
    for measure in range(len(measures.scores)):
        res_dict = {}
        result_vectors = [np.empty(nr_subs_total) + np.nan
                            for _ in np.unique(fm.category)]
        res_dict.update(zip(np.unique(fm.category), result_vectors))
        intersub_scores.append(res_dict)
    #compute inter-subject scores for every stimulus, with leave-one-out
    #over subjects
    for fm_cat in fm.by_field('category'):
        cat = fm_cat.category[0]
        for (sub_counter, sub) in enumerate(np.unique(fm_cat.SUBJECTINDEX)):
            image_scores = []
            for fm_single in fm_cat.by_field('filenumber'):
                predicting_subs = (np.setdiff1d(np.unique(
                    fm_single.SUBJECTINDEX),[sub]))
                np.random.shuffle(predicting_subs)
                predicting_subs = predicting_subs[0:nr_subs]
                predicting_fm = fm_single[
                    (ismember(fm_single.SUBJECTINDEX, predicting_subs))]
                predicted_fm = fm_single[fm_single.SUBJECTINDEX == sub]
                try:
                    predicting_fdm = compute_fdm(predicting_fm,
                        scale_factor = scale_factor)
                except NoFixations:
                    predicting_fdm = None
                image_scores.append(measures.prediction_scores(
                                        predicting_fdm, predicted_fm))
            for (measure, score) in enumerate(nanmean(image_scores, 0)):
                intersub_scores[measure][cat][sub_counter] = score
    return intersub_scores
예제 #12
0
def emd_model(prediction, fm):
    """
    wraps emd functionality for model evaluation
    
    requires:
        OpenCV python bindings
        
    input:
        prediction: the model salience map
        fm : fixmat filtered for the image corresponding to the prediction
    """
    (_, r_x) = calc_resize_factor(prediction, fm.image_size)
    gt = fixmat.compute_fdm(fm, scale_factor = r_x)
    return emd(prediction, gt)
예제 #13
0
 def test_correlation(self):
     fm = fixmat.TestFixmatFactory(categories = [1,2,3], 
             filenumbers = [1,2,3,4,5,6],
             subjectindices = [1, 2, 3, 4, 5, 6],
             params = {'pixels_per_degree':1, 'image_size':[100,500]})
     # Arr has zero variance, should return nan
     arr = np.ones(fm.image_size)
     corr = measures.correlation_model(arr, fm)
     self.assertTrue(np.isnan(corr))
     # With itself should give 1
     fdm = fixmat.compute_fdm(fm)
     corr = measures.correlation_model(fdm,fm)
     self.assertEquals(corr,1)
     # Anti-correlation should give -1
     corr = measures.correlation_model(-1*fdm,fm)
     self.assertEquals(corr,-1)
예제 #14
0
 def test_nss_values(self):
     fm = fixmat.TestFixmatFactory(categories = [1,2,3], 
             filenumbers = [1,2,3,4,5,6],
             subjectindices = [1, 2, 3, 4, 5, 6],
             params = {'pixels_per_degree':0.1, 'image_size':[200,500]})
     # Arr has zero variance, should return nan
     arr = np.ones(fm.image_size)
     nss = measures.nss_model(arr, fm)
     self.assertTrue(np.isnan(nss))
     # With itself should yield a high value 
     fdm = fixmat.compute_fdm(fm)
     nss = measures.nss_model(fdm, fm)
     self.assertTrue(nss>15)
     # Fixations at these locations should give nss < 0
     nss = measures.nss(fdm, [[100, 101, 102, 103, 104, 105],[0, 0, 0, 0, 0, 0]])
     self.assertTrue(nss < 0)
예제 #15
0
 def test_emd(self):
    try: 
        import opencv
    except ImportError:
        print "Skipping EMD test - no opencv available"
        return 
    opencv # pyflakes
    fm = fixmat.TestFixmatFactory(categories = [1,2,3], 
        filenumbers = [1,2,3,4,5,6],
        subjectindices = [1, 2, 3, 4, 5, 6],
        params = {'pixels_per_degree':1, 'image_size':[20,50]})
    arr = np.ones(fm.image_size)
    fdm = fixmat.compute_fdm(fm)
    e = measures.emd_model(arr, fm)
    self.assertTrue(e > 0)
    e = measures.emd(fdm, fdm)
    self.assertEquals(e, 0) 
예제 #16
0
 def test_prediction_scores(self):
     measures.set_scores([measures.roc_model, 
                          measures.kldiv_model, 
                          measures.nss_model])
     fm = fixmat.TestFixmatFactory(categories = [1,2,3], 
             filenumbers = [1,2,3,4,5,6],
             subjectindices = [1, 2, 3, 4, 5, 6],
             params = {'pixels_per_degree':10, 'image_size':[100,500]})
     fdm = fixmat.compute_fdm(fm)
     measures.set_scores([measures.roc_model, 
                          measures.kldiv_model, 
                          measures.nss_model])
     scores  =  measures.prediction_scores(fdm, fm) 
     self.assertEquals(len(scores), 3)
     measures.set_scores([measures.roc_model])
     scores  =  measures.prediction_scores(fdm, fm) 
     self.assertEquals(len(scores), 1)
     measures.set_scores([measures.roc_model, 
                          measures.kldiv_model, 
                          measures.nss_model])
     scores  =  measures.prediction_scores(fdm, fm) 
     self.assertEquals(len(scores), 3)
예제 #17
0
 def test_kldiv(self):
     arr = scipy.random.random((21,13))
     fm = fixmat.TestFixmatFactory(categories = [1,2,3], 
             filenumbers = [1,2,3,4,5,6],
             subjectindices = [1, 2, 3, 4, 5, 6],
             params = {'pixels_per_degree':10, 'image_size':[100,500]})
     
     kl = measures.kldiv(arr, arr)
     self.assertEqual(kl, 0, 
             "KL Divergence between same distribution should be 0")
     kl = measures.kldiv(None, None, distp = fm, distq = fm, scale_factor = 0.25)
     self.assertEqual(kl, 0, 
             "KL Divergence between same distribution should be 0")
     fdm = fixmat.compute_fdm(fm)
     kl = measures.kldiv_model(fdm, fm)
     self.assertTrue(kl < 10**-13, 
             "KL Divergence between same distribution should be almost 0")
     fm.x = np.array([])
     fm.y = np.array([])
     
     kl = measures.kldiv(None, None, distp = fm, distq = fm, scale_factor = 0.25)
     self.assertTrue(np.isnan(kl))
예제 #18
0
파일: bounds.py 프로젝트: eyequant/ocupy
def lower_bound(fm, nr_subs=None, nr_imgs=None, scale_factor=1):
    """
    Compute the spatial bias lower bound for a fixmat.

    Input:
        fm : a fixmat instance
        nr_subs : the number of subjects used for the prediction. Defaults
                  to the total number of subjects in the fixmat minus 1
        nr_imgs : the number of images used for prediction. If given, the
                  same number will be used for every category. If not given,
                  leave-one-out will be used in all categories.
        scale_factor : the scale factor of the FDMs. Default is 1.
    Returns:
        A list of spatial bias scores; the list contains one dictionary for each
         measure. Each dictionary contains one key for each category and
        corresponding values is an array with scores for each subject.
    """
    nr_subs_total = len(np.unique(fm.SUBJECTINDEX))
    if nr_subs is None:
        nr_subs = nr_subs_total - 1
    assert (nr_subs < nr_subs_total)
    # initialize output structure; every measure gets one dict with
    # category numbers as keys and numpy-arrays as values
    sb_scores = []
    for measure in range(len(measures.scores)):
        res_dict = {}
        result_vectors = [
            np.empty(nr_subs_total) + np.nan for _ in np.unique(fm.category)
        ]
        res_dict.update(zip(np.unique(fm.category), result_vectors))
        sb_scores.append(res_dict)
    # compute mean spatial bias predictive power for all subjects in all
    # categories
    for fm_cat in fm.by_field('category'):
        cat = fm_cat.category[0]
        nr_imgs_cat = len(np.unique(fm_cat.filenumber))
        if not nr_imgs:
            nr_imgs_current = nr_imgs_cat - 1
        else:
            nr_imgs_current = nr_imgs
        assert (nr_imgs_current < nr_imgs_cat)
        for (sub_counter, sub) in enumerate(np.unique(fm.SUBJECTINDEX)):
            image_scores = []
            for fm_single in fm_cat.by_field('filenumber'):
                # Iterating by field filenumber makes filenumbers
                # in fm_single unique: Just take the first one to get the
                # filenumber for this fixmat
                fn = fm_single.filenumber[0]
                predicting_subs = (np.setdiff1d(np.unique(fm_cat.SUBJECTINDEX),
                                                [sub]))
                np.random.shuffle(predicting_subs)
                predicting_subs = predicting_subs[0:nr_subs]
                predicting_fns = (np.setdiff1d(np.unique(fm_cat.filenumber),
                                               [fn]))
                np.random.shuffle(predicting_fns)
                predicting_fns = predicting_fns[0:nr_imgs_current]
                predicting_fm = fm_cat[
                    (ismember(fm_cat.SUBJECTINDEX, predicting_subs))
                    & (ismember(fm_cat.filenumber, predicting_fns))]
                predicted_fm = fm_single[fm_single.SUBJECTINDEX == sub]
                try:
                    predicting_fdm = compute_fdm(predicting_fm,
                                                 scale_factor=scale_factor)
                except RuntimeError:
                    predicting_fdm = None
                image_scores.append(
                    measures.prediction_scores(predicting_fdm, predicted_fm))
            for (measure, score) in enumerate(nanmean(image_scores, 0)):
                sb_scores[measure][cat][sub_counter] = score
    return sb_scores
예제 #19
0
파일: bounds.py 프로젝트: eyequant/ocupy
def intersubject_scores(fm,
                        category,
                        predicting_filenumbers,
                        predicting_subjects,
                        predicted_filenumbers,
                        predicted_subjects,
                        controls=True,
                        scale_factor=1):
    """
    Calculates how well the fixations from a set of subjects on a set of
    images can be predicted with the fixations from another set of subjects
    on another set of images.

    The prediction is carried out by computing a fixation density map from
    fixations of predicting_subjects subjects on predicting_images images.
    Prediction accuracy is assessed by measures.prediction_scores.

    Parameters
        fm : fixmat instance
        category : int
            Category from which the fixations are taken.
        predicting_filenumbers : list
            List of filenumbers used for prediction, i.e. images where fixations
            for the prediction are taken from.
        predicting_subjects : list
            List of subjects whose fixations on images in predicting_filenumbers
            are used for the prediction.
        predicted_filenumnbers : list
            List of images from which the to be predicted fixations are taken.
        predicted_subjects : list
            List of subjects used for evaluation, i.e subjects whose fixations
            on images in predicted_filenumbers are taken for evaluation.
        controls : bool, optional
            If True (default), n_predict subjects are chosen from the fixmat.
            If False, 1000 fixations are randomly generated and used for
            testing.
        scale_factor : int, optional
            specifies the scaling of the fdm. Default is 1.

    Returns
        auc : area under the roc curve for sets of actuals and controls
        true_pos_rate : ndarray
            Rate of true positives for every given threshold value.
            All values appearing in actuals are taken as thresholds. Uses lower
            sum interpolation.
        false_pos_rate : ndarray
            See true_pos_rate but for false positives.

    """
    predicting_fm = fm[(ismember(fm.SUBJECTINDEX, predicting_subjects))
                       & (ismember(fm.filenumber, predicting_filenumbers)) &
                       (fm.category == category)]
    predicted_fm = fm[(ismember(fm.SUBJECTINDEX, predicted_subjects))
                      & (ismember(fm.filenumber, predicted_filenumbers)) &
                      (fm.category == category)]
    try:
        predicting_fdm = compute_fdm(predicting_fm, scale_factor=scale_factor)
    except RuntimeError:
        predicting_fdm = None

    if controls == True:
        fm_controls = fm[(ismember(fm.SUBJECTINDEX, predicted_subjects)) & (
            (ismember(fm.filenumber, predicted_filenumbers)) != True) &
                         (fm.category == category)]
        return measures.prediction_scores(predicting_fdm,
                                          predicted_fm,
                                          controls=(fm_controls.y,
                                                    fm_controls.x))
    return measures.prediction_scores(predicting_fdm,
                                      predicted_fm,
                                      controls=None)
예제 #20
0
파일: bounds.py 프로젝트: nicoschmidt/ocupy
def lower_bound(fm, nr_subs = None, nr_imgs = None, scale_factor = 1):
    """
    Compute the spatial bias lower bound for a fixmat.

    Input:
        fm : a fixmat instance
        nr_subs : the number of subjects used for the prediction. Defaults
                  to the total number of subjects in the fixmat minus 1
        nr_imgs : the number of images used for prediction. If given, the
                  same number will be used for every category. If not given,
                  leave-one-out will be used in all categories.
        scale_factor : the scale factor of the FDMs. Default is 1.
    Returns:
        A list of spatial bias scores; the list contains one dictionary for each
         measure. Each dictionary contains one key for each category and
        corresponding values is an array with scores for each subject.
    """
    nr_subs_total = len(np.unique(fm.SUBJECTINDEX))
    if nr_subs is None:
        nr_subs = nr_subs_total - 1
    assert (nr_subs < nr_subs_total)
    # initialize output structure; every measure gets one dict with
    # category numbers as keys and numpy-arrays as values
    sb_scores = []
    for measure in range(len(measures.scores)):
        res_dict = {}
        result_vectors = [np.empty(nr_subs_total) + np.nan
                            for _ in np.unique(fm.category)]
        res_dict.update(zip(np.unique(fm.category),result_vectors))
        sb_scores.append(res_dict)
    # compute mean spatial bias predictive power for all subjects in all
    # categories
    for fm_cat in fm.by_field('category'):
        cat = fm_cat.category[0]
        nr_imgs_cat = len(np.unique(fm_cat.filenumber))
        if not nr_imgs:
            nr_imgs_current = nr_imgs_cat - 1
        else:
            nr_imgs_current = nr_imgs
        assert(nr_imgs_current < nr_imgs_cat)
        for (sub_counter, sub) in enumerate(np.unique(fm.SUBJECTINDEX)):
            image_scores = []
            for fm_single in fm_cat.by_field('filenumber'):
                # Iterating by field filenumber makes filenumbers
                # in fm_single unique: Just take the first one to get the
                # filenumber for this fixmat
                fn = fm_single.filenumber[0]
                predicting_subs = (np.setdiff1d(np.unique(
                    fm_cat.SUBJECTINDEX), [sub]))
                np.random.shuffle(predicting_subs)
                predicting_subs = predicting_subs[0:nr_subs]
                predicting_fns = (np.setdiff1d(np.unique(
                    fm_cat.filenumber), [fn]))
                np.random.shuffle(predicting_fns)
                predicting_fns = predicting_fns[0:nr_imgs_current]
                predicting_fm = fm_cat[
                    (ismember(fm_cat.SUBJECTINDEX, predicting_subs)) &
                    (ismember(fm_cat.filenumber, predicting_fns))]
                predicted_fm = fm_single[fm_single.SUBJECTINDEX == sub]
                try:
                    predicting_fdm = compute_fdm(predicting_fm,
                        scale_factor = scale_factor)
                except NoFixations:
                    predicting_fdm = None
                image_scores.append(measures.prediction_scores(predicting_fdm,
                     predicted_fm))
            for (measure, score) in enumerate(nanmean(image_scores, 0)):
                sb_scores[measure][cat][sub_counter] = score
    return sb_scores
예제 #21
0
 def skip_emd(self):
     fm1 = fixmat.TestFactory(params = {'image_size':[93,128]})
     fm2 = fixmat.TestFactory(points=zip(range(10,50),range(10,50)),params = {'image_size':[93,128]})
     self.assertEquals(measures.emd_model(fixmat.compute_fdm(fm1), fm1), 0)
     self.assertTrue(not (measures.emd_model(fixmat.compute_fdm(fm1), fm2) == 0))
예제 #22
0
 def test_pdf(self):
     fdm = fixmat.compute_fdm(self.fm)
     # ensure that we have a probability density function
     if fdm.min() < 0:
         fdm = fdm - fdm.min()
     self.assertFalse(abs(fdm.sum() - 1) > np.finfo('single').eps)
예제 #23
0
 def test_pdf(self):
     fdm = fixmat.compute_fdm(self.fm)
     # ensure that we have a probability density function
     if fdm.min()< 0:
         fdm = fdm - fdm.min()
     self.assertFalse(abs(fdm.sum()-1) > np.finfo('single').eps)
예제 #24
0
파일: bounds.py 프로젝트: nicoschmidt/ocupy
def intersubject_scores(fm, category, predicting_filenumbers,
                        predicting_subjects, predicted_filenumbers,
                        predicted_subjects, controls = True, scale_factor = 1):
    """
    Calculates how well the fixations from a set of subjects on a set of
    images can be predicted with the fixations from another set of subjects
    on another set of images.

    The prediction is carried out by computing a fixation density map from
    fixations of predicting_subjects subjects on predicting_images images.
    Prediction accuracy is assessed by measures.prediction_scores.

    Parameters
        fm : fixmat instance
        category : int
            Category from which the fixations are taken.
        predicting_filenumbers : list
            List of filenumbers used for prediction, i.e. images where fixations
            for the prediction are taken from.
        predicting_subjects : list
            List of subjects whose fixations on images in predicting_filenumbers
            are used for the prediction.
        predicted_filenumnbers : list
            List of images from which the to be predicted fixations are taken.
        predicted_subjects : list
            List of subjects used for evaluation, i.e subjects whose fixations
            on images in predicted_filenumbers are taken for evaluation.
        controls : bool, optional
            If True (default), n_predict subjects are chosen from the fixmat.
            If False, 1000 fixations are randomly generated and used for
            testing.
        scale_factor : int, optional
            specifies the scaling of the fdm. Default is 1.

    Returns
        auc : area under the roc curve for sets of actuals and controls
        true_pos_rate : ndarray
            Rate of true positives for every given threshold value.
            All values appearing in actuals are taken as thresholds. Uses lower
            sum interpolation.
        false_pos_rate : ndarray
            See true_pos_rate but for false positives.

    """
    predicting_fm = fm[
        (ismember(fm.SUBJECTINDEX, predicting_subjects)) &
        (ismember(fm.filenumber, predicting_filenumbers)) &
        (fm.category == category)]
    predicted_fm = fm[
        (ismember(fm.SUBJECTINDEX,predicted_subjects)) &
        (ismember(fm.filenumber,predicted_filenumbers))&
        (fm.category == category)]
    try:
        predicting_fdm = compute_fdm(predicting_fm, scale_factor = scale_factor)
    except NoFixations:
        predicting_fdm = None

    if controls == True:
        fm_controls = fm[
            (ismember(fm.SUBJECTINDEX, predicted_subjects)) &
            ((ismember(fm.filenumber, predicted_filenumbers)) != True) &
            (fm.category == category)]
        return measures.prediction_scores(predicting_fdm, predicted_fm,
            controls = (fm_controls.y, fm_controls.x))
    return measures.prediction_scores(predicting_fdm, predicted_fm, controls = None)