예제 #1
0
    def test_underscores_in_datatype_names(self):
        trials = data.TrialHandlerExt([], 1, autoLog=False)
        trials.data.addDataType('with_underscore')
        for trial in trials:#need to run trials or file won't be saved
            trials.addData('with_underscore', 0)
        base_data_filename = pjoin(self.temp_dir, self.rootName)
        trials.saveAsExcel(base_data_filename)
        trials.saveAsText(base_data_filename, delim=',')

        # Make sure the file is there
        data_filename = base_data_filename + '.csv'
        assert os.path.exists(data_filename), "File not found: %s" %os.path.abspath(data_filename)

        # Make sure the header line is correct
        # We open the file with universal newline support (PEP-278).
        if PY3:
            with open(data_filename, 'r', newline=None) as f:
                header = f.readline()
        else:
            with open(data_filename, 'rU') as f:
                header = f.readline()

        expected_header = u'n,with_underscore_mean,with_underscore_raw,with_underscore_std,order\n'
        if expected_header != header:
            print(base_data_filename)
            print(repr(expected_header),type(expected_header),len(expected_header))
            print(repr(header), type(header), len(header))
        assert expected_header == str(header)
예제 #2
0
    def test_random_data_output(self):
        # create conditions
        conditions=[]
        for trialType in range(5):
            conditions.append({'trialType':trialType})

        trials= data.TrialHandlerExt(trialList=conditions, seed=100, nReps=3,
                                  method='random', autoLog=False)
        # simulate trials
        rng = np.random.RandomState(seed=self.random_seed)

        for thisTrial in trials:
            resp = 'resp'+str(thisTrial['trialType'])
            randResp = rng.rand()
            trials.addData('resp', resp)
            trials.addData('rand', randResp)

        # test summarised data outputs      #this omits values
        trials.saveAsText(pjoin(self.temp_dir, 'testRandom.tsv'),
                          stimOut=['trialType'], appendFile=False)
        utils.compareTextFiles(pjoin(self.temp_dir, 'testRandom.tsv'),
                               pjoin(fixturesPath,'corrRandom.tsv'))
        # test wide data outputs
        trials.saveAsWideText(pjoin(self.temp_dir, 'testRandom.csv'),
                              delim=',', appendFile=False)#this omits values
        utils.compareTextFiles(pjoin(self.temp_dir, 'testRandom.csv'),
                               pjoin(fixturesPath,'corrRandom.csv'))
예제 #3
0
    def test_psydat_filename_collision_overwriting(self):
        for count in [1, 10, 20]:
            trials = data.TrialHandlerExt([], 1, autoLog=False)
            trials.data.addDataType('trialType')
            for trial in trials:  #need to run trials or file won't be saved
                trials.addData('trialType', 0)
            base_data_filename = pjoin(self.temp_dir,
                                       self.rootName + 'overwrite')

            trials.saveAsPickle(base_data_filename,
                                fileCollisionMethod='overwrite')

            # Make sure the file just saved is there
            data_filename = base_data_filename + '.psydat'
            assert os.path.exists(
                data_filename
            ), "File not found: %s" % os.path.abspath(data_filename)

            # Make sure the correct number of files for the loop are there.
            # (No overwriting by default).
            matches = len(
                glob.glob(
                    os.path.join(self.temp_dir,
                                 self.rootName + "*overwrite.psydat")))
            assert matches == 1, "Found %d matching files, should be %d" % (
                matches, count)
예제 #4
0
    def test_underscores_in_datatype_names(self):
        trials = data.TrialHandlerExt([], 1, autoLog=False)
        trials.data.addDataType('with_underscore')
        for trial in trials:  #need to run trials or file won't be saved
            trials.addData('with_underscore', 0)
        base_data_filename = pjoin(self.temp_dir, self.rootName)
        trials.saveAsExcel(base_data_filename)
        trials.saveAsText(base_data_filename, delim=',')

        # Make sure the file is there
        data_filename = base_data_filename + '.csv'
        assert os.path.exists(
            data_filename
        ), "File not found: %s" % os.path.abspath(data_filename)

        # Make sure the header line is correct
        f = open(data_filename, 'rb')
        header = f.readline().replace(b'\n', b'')
        f.close()
        expected_header = u"n,with_underscore_mean,with_underscore_raw,with_underscore_std,order"
        if expected_header != header:
            print(base_data_filename)
            print(repr(expected_header), type(expected_header),
                  len(expected_header))
            print(repr(header), type(header), len(header))
        assert expected_header == str(header)
예제 #5
0
 def test_psydat_filename_collision_output(self):
     #create conditions
     conditions = []
     for trialType in range(5):
         conditions.append({'trialType': trialType})
         #create trials
     trials = data.TrialHandlerExt(trialList=conditions,
                                   seed=100,
                                   nReps=3,
                                   method='fullRandom',
                                   autoLog=False)
     #simulate trials
     for thisTrial in trials:
         resp = 'resp' + str(thisTrial['trialType'])
         randResp = random(
         )  #a unique number so we can see which track orders
         trials.addData('resp', resp)
         trials.addData('rand', randResp)
     #test summarised data outputs              #this omits values
     trials.saveAsText(pjoin(self.temp_dir, 'testFullRandom.tsv'),
                       stimOut=['trialType'],
                       appendFile=False)
     utils.compareTextFiles(pjoin(self.temp_dir, 'testFullRandom.tsv'),
                            pjoin(fixturesPath, 'corrFullRandom.tsv'))
     #test wide data outputs                     #this omits values
     trials.saveAsWideText(pjoin(self.temp_dir, 'testFullRandom.csv'),
                           delim=',',
                           appendFile=False)
     utils.compareTextFiles(pjoin(self.temp_dir, 'testFullRandom.csv'),
                            pjoin(fixturesPath, 'corrFullRandom.csv'))
예제 #6
0
    def test_psydat_filename_collision_failure(self):
        with pytest.raises(IOError):
            for count in range(1,3):
                trials = data.TrialHandlerExt([], 1, autoLog=False)
                trials.data.addDataType('trialType')
                for trial in trials:#need to run trials or file won't be saved
                    trials.addData('trialType', 0)
                base_data_filename = pjoin(self.temp_dir, self.rootName)

                trials.saveAsPickle(base_data_filename, fileCollisionMethod='fail')
예제 #7
0
    def setup_method(self, method):
        # create conditions
        conditions = []
        for trialType in range(5):
            conditions.append({'trialType':trialType})

        self.trials = data.TrialHandlerExt(
            trialList=conditions, seed=self.random_seed, nReps=3,
            method='random', autoLog=False)

        # simulate trials
        rng = np.random.RandomState(seed=self.random_seed)

        for thisTrial in self.trials:
            resp = 'resp' + str(thisTrial['trialType'])
            randResp = rng.rand()
            self.trials.addData('resp', resp)
            self.trials.addData('rand', randResp)
예제 #8
0
 def test_comparison_not_equal_after_iteration(self):
     t1 = data.TrialHandlerExt([dict(foo=1)], 2)
     t2 = data.TrialHandlerExt([dict(foo=1)], 3)
     t1.__next__()
     t2.__next__()
     assert t1 != t2
예제 #9
0
 def test_comparison_not_equal(self):
     t1 = data.TrialHandlerExt([dict(foo=1)], 2)
     t2 = data.TrialHandlerExt([dict(foo=1)], 3)
     assert t1 != t2
예제 #10
0
 def test_comparison_equals(self):
     t1 = data.TrialHandlerExt([dict(foo=1)], 2)
     t2 = data.TrialHandlerExt([dict(foo=1)], 2)
     assert t1 == t2