def test_selection_by_mask(self): """ Test mask-based Mappable selection. Only one peak in the test dataset (in study5) should be within the sgACC. """ ids = self.dataset.get_ids_by_mask( get_test_data_path() + 'sgacc_mask.nii.gz') self.assertEquals(len(ids), 1) self.assertEquals('study5', ids[0])
def test_selection_by_mask(self): """ Test mask-based Mappable selection. Only one peak in the test dataset (in study5) should be within the sgACC. """ ids = self.dataset.get_ids_by_mask(get_test_data_path() + 'sgacc_mask.nii.gz') self.assertEquals(len(ids), 1) self.assertEquals('study5', ids[0])
def test_audio_stim(self): audio_dir = join(get_test_data_path(), 'audio') stim = AudioStim(join(audio_dir, 'barber.wav')) self.assertEquals(stim.sampling_rate, 11025) ann = STFTAnnotator(frame_size=1., spectrogram=False, bins=[(100, 300), (300, 3000), (3000, 20000)]) timeline = stim.annotate([ann]) df = timeline.to_df('long') self.assertEquals(df.shape, (1671, 4))
def test_complex_text_stim(self): text_dir = join(get_test_data_path(), 'text') stim = ComplexTextStim(join(text_dir, 'complex_stim_no_header.txt'), columns='ot', default_duration=0.2) self.assertEquals(len(stim.elements), 4) self.assertEquals(stim.elements[2].onset, 34) self.assertEquals(stim.elements[2].duration, 0.2) stim = ComplexTextStim(join(text_dir, 'complex_stim_with_header.txt')) self.assertEquals(len(stim.elements), 4) self.assertEquals(stim.elements[2].duration, 0.1)
def test_text_annotation(self): text_dir = join(get_test_data_path(), 'text') stim = ComplexTextStim(join(text_dir, 'sample_text.txt'), columns='to', default_duration=1) td = TextDictionaryAnnotator(join(text_dir, 'test_lexical_dictionary.txt'), variables=['length', 'frequency']) self.assertEquals(td.data.shape, (7, 2)) timeline = stim.annotate([td]) df = TimelineExporter.timeline_to_df(timeline) self.assertEquals(df.shape, (12, 4)) self.assertEquals(df.iloc[9, 3], 10.6)
def test_full_pipeline(self): ''' Smoke test of entire pipeline, from stimulus loading to event file export. ''' stim = VideoStim(join(get_test_data_path(), 'video', 'small.mp4')) annotators = [DenseOpticalFlowAnnotator(), FaceDetectionAnnotator()] timeline = stim.annotate(annotators, show=False) exp = FSLExporter() tmpdir = tempfile.mkdtemp() exp.export(timeline, tmpdir) from glob import glob files = glob(join(tmpdir, '*.txt')) self.assertEquals(len(files), 2) shutil.rmtree(tmpdir)
def test_video_stim(self): ''' Test VideoStim functionality. ''' filename = join(get_test_data_path(), 'video', 'small.mp4') video = VideoStim(filename) self.assertEquals(video.fps, 30) self.assertEquals(video.n_frames, 166) self.assertEquals(video.width, 560) # Test frame iterator frames = [f for f in video] self.assertEquals(len(frames), 166) f = frames[100] self.assertIsInstance(f, VideoFrameStim) self.assertIsInstance(f.onset, float) self.assertEquals(f.data.shape, (320, 560, 3))
#-- Load params split_name = str(args.split) test_iters = args.test_iters test_model = args.test_model snapshot = args.snapshot experiment = args.experiment gauss_var = args.gauss_var #--- GPU caffe.set_mode_gpu() #--- LOAD SMOTHED POSITION MAPS position_maps = utils.load_position_maps(split_name, gauss_var) #--- LOAD TEST DATA test_data = utils.get_test_data_path(split_name) #--- GET TEST RESULTS PATH test_res_path = utils.get_result_path(experiment, split_name) ###--- TEST print 'Testing' sys.stdout.flush() net_results, position_results = test_net(test_model, snapshot, test_data, test_iters, position_maps) im_acc, price_acc, name_acc = net_results print 'NET: image accuracy:', im_acc print 'NET: price accuracy:', price_acc print 'NET: name accuracy:', name_acc
import pandas as pd from utils import get_test_data_path import os from os.path import join data_dir = get_test_data_path() zero_csv_file = join(data_dir, "csv_files_zero") if not os.path.exists(zero_csv_file): os.makedirs(zero_csv_file) files = join(data_dir, "csv_files") print(files) for filename in os.listdir(files): if "presentation" in filename: csv_file = join(files, filename) df = pd.read_csv(csv_file) lst = [0] for row in range(len(df["ImageDisplay1.OffsetTime"])): try: time_difference = df["ImageDisplay1.OffsetTime"][row] - df[ "ImageDisplay1.OffsetTime"][row - 2] if time_difference > 7500 or row == 0: lst += [row] print(lst) except: continue