예제 #1
0
    def test_file_stimuli(self):
        img1 = np.random.randint(255, size=(100, 200, 3)).astype('uint8')
        filename1 = os.path.join(self.data_path, 'img1.png')
        imwrite(filename1, img1)

        img2 = np.random.randint(255, size=(50, 150)).astype('uint8')
        filename2 = os.path.join(self.data_path, 'img2.png')
        imwrite(filename2, img2)

        stimuli = pysaliency.FileStimuli([filename1, filename2])

        self.assertEqual(len(stimuli.stimuli), 2)
        for s1, s2 in zip(stimuli.stimuli, [img1, img2]):
            np.testing.assert_allclose(s1, s2)
        self.assertEqual(stimuli.shapes, [(100, 200, 3), (50, 150)])
        self.assertEqual(list(stimuli.sizes), [(100, 200), (50, 150)])
        self.assertEqual(stimuli.stimulus_ids[1],
                         pysaliency.datasets.get_image_hash(img2))
        self.assertEqual(stimuli.stimulus_objects[1].stimulus_id,
                         stimuli.stimulus_ids[1])

        new_stimuli = self.pickle_and_reload(stimuli, pickler=dill)
        print(new_stimuli.stimuli)

        self.assertEqual(len(new_stimuli.stimuli), 2)
        for s1, s2 in zip(new_stimuli.stimuli, [img1, img2]):
            np.testing.assert_allclose(s1, s2)
        self.assertEqual(new_stimuli.shapes, [(100, 200, 3), (50, 150)])
        self.assertEqual(list(new_stimuli.sizes), [(100, 200), (50, 150)])
        self.assertEqual(new_stimuli.stimulus_ids[1],
                         pysaliency.datasets.get_image_hash(img2))
        self.assertEqual(new_stimuli.stimulus_objects[1].stimulus_id,
                         stimuli.stimulus_ids[1])
예제 #2
0
def file_stimuli_with_attributes(tmpdir):
    filenames = []
    for i in range(3):
        filename = tmpdir.join('stimulus_{:04d}.png'.format(i))
        imwrite(
            str(filename),
            np.random.randint(low=0,
                              high=255,
                              size=(100, 100, 3),
                              dtype=np.uint8))
        filenames.append(str(filename))

    for sub_directory_index in range(3):
        sub_directory = tmpdir.join(
            'sub_directory_{:04d}'.format(sub_directory_index))
        sub_directory.mkdir()
        for i in range(5):
            filename = sub_directory.join('stimulus_{:04d}.png'.format(i))
            imwrite(
                str(filename),
                np.random.randint(low=0,
                                  high=255,
                                  size=(100, 100, 3),
                                  dtype=np.uint8))
            filenames.append(str(filename))
    attributes = {
        'dva': list(range(len(filenames))),
        'other_stuff': np.random.randn(len(filenames)),
        'some_strings': list('abcdefghijklmnopqr'),
    }
    return pysaliency.FileStimuli(filenames=filenames, attributes=attributes)
예제 #3
0
def file_stimuli(tmpdir):
    filenames = []
    for i in range(3):
        filename = tmpdir.join('stimulus_{:04d}.png'.format(i))
        imsave(
            str(filename),
            np.random.randint(low=0,
                              high=255,
                              size=(100, 100, 3),
                              dtype=np.uint8))
        filenames.append(str(filename))

    for sub_directory_index in range(3):
        sub_directory = tmpdir.join(
            'sub_directory_{:04d}'.format(sub_directory_index))
        sub_directory.mkdir()
        for i in range(5):
            filename = sub_directory.join('stimulus_{:04d}.png'.format(i))
            imsave(
                str(filename),
                np.random.randint(low=0,
                                  high=255,
                                  size=(100, 100, 3),
                                  dtype=np.uint8))
            filenames.append(str(filename))
    return pysaliency.FileStimuli(filenames=filenames)
예제 #4
0
    def test_slicing(self):
        count = 10
        widths = np.random.randint(20, 200, size=count)
        heights = np.random.randint(20, 200, size=count)
        images = [np.random.randint(255, size=(h, w, 3)) for h, w in zip(heights, widths)]
        filenames = []
        for i, img in enumerate(images):
            filename = os.path.join(self.data_path, 'img{}.png'.format(i))
            imwrite(filename, img)
            filenames.append(filename)

        stimuli = pysaliency.FileStimuli(filenames)
        for i in range(count):
            s = stimuli[i]
            np.testing.assert_allclose(s.stimulus_data, stimuli.stimuli[i])
            self.assertEqual(s.stimulus_id, stimuli.stimulus_ids[i])
            self.assertEqual(s.shape, stimuli.shapes[i])
            self.assertEqual(s.size, stimuli.sizes[i])

        indices = [2, 4, 7]
        ss = stimuli[indices]
        for k, i in enumerate(indices):
            np.testing.assert_allclose(ss.stimuli[k], stimuli.stimuli[i])
            self.assertEqual(ss.stimulus_ids[k], stimuli.stimulus_ids[i])
            self.assertEqual(ss.shapes[k], stimuli.shapes[i])
            self.assertEqual(list(ss.sizes[k]), list(stimuli.sizes[i]))

        slc = slice(2, 8, 3)
        ss = stimuli[slc]
        indices = range(len(stimuli))[slc]
        for k, i in enumerate(indices):
            np.testing.assert_allclose(ss.stimuli[k], stimuli.stimuli[i])
            self.assertEqual(ss.stimulus_ids[k], stimuli.stimulus_ids[i])
            self.assertEqual(ss.shapes[k], stimuli.shapes[i])
            self.assertEqual(list(ss.sizes[k]), list(stimuli.sizes[i]))
def file_stimuli(tmpdir):
    filenames = []
    for i in range(10):
        filename = tmpdir.join('stimulus_{:04d}.png'.format(i))
        imsave(
            str(filename),
            np.random.randint(low=0,
                              high=255,
                              size=(100, 100, 3),
                              dtype=np.uint8))
        filenames.append(str(filename))

    return pysaliency.FileStimuli(filenames=filenames)
def test_export_model_overwrite(file_stimuli, tmpdir):
    model1 = pysaliency.GaussianSaliencyMapModel(width=0.1)
    model2 = pysaliency.GaussianSaliencyMapModel(width=0.8)

    filename = str(tmpdir.join('model.hdf5'))

    partial_stimuli = pysaliency.FileStimuli(
        filenames=file_stimuli.filenames[:5])

    export_model_to_hdf5(model1, partial_stimuli, filename)
    export_model_to_hdf5(model2, file_stimuli, filename)

    model3 = pysaliency.HDF5SaliencyMapModel(file_stimuli, filename)
    for s in file_stimuli:
        np.testing.assert_allclose(model2.saliency_map(s),
                                   model3.saliency_map(s))
예제 #7
0
def main(args):
    if len(args)!=5:
        print("Usage: score.py stimdir modeldir fixationfile goldstandarddir")
        return
    
    stimdir=args[1]
    modeldir=args[2]
    fixfile=args[3]
    goldstandarddir=args[4]
    # This expects a directory of pngs, jpgs, tiff, mats, or npys.
    # The stimulus list is given as a set of files.
    # More specifically, that set is given as a FileStimuli object
    stims=os.listdir(stimdir)
    # Assumption: we're dealing with an extracted norpix dataset or some such.
    sortedstims=[os.path.join(stimdir, s) for s in stims]
    stimuli=pysaliency.FileStimuli(sortedstims)
    #Further note: the number of model images must be equal to the number of stimuli. They must also se the same base naming convention (less file extensions) as said stimuli.
    print("Loading data into model...")
    model=pysaliency.SaliencyMapModelFromDirectory(stimuli, modeldir)
    
    print("Loading gold standard model...")
    goldstandard=pysaliency.SaliencyMapModelFromDirectory(stimuli, goldstandarddir)
    
    print("Reading fixations...")
    fixations=getFixations(fixfile)
    
    # To evaluate the model, we need to call one of the evaluation functions in the model itself.
    print("score:")
    print("AUC:")
    print(model.AUC(stimuli, fixations, nonfixations="uniform")) # nonfixations may also be "shuffled"
    print("Fixation based KL divergence:")
    print(model.fixation_based_KL_divergence(stimuli, fixations, nonfixations="uniform"))
    print("Image based KL divergence:")
    # TODO: consider setting the model parameter to false, since the passed gold standard *should* potentially be probabalistic.
    print(model.image_based_kl_divergence(stimuli, goldstandard))
    print("NSS:")
    print(model.NSS(stimuli, fixations))