def test_assign_list_types(self): session = listgen.fr.generate_session_pool() session = listgen.assign_list_types(session, 4, 7, 11, 4) words_per_list = 12 assert 'stim_channels' in session assert all( session[session.stim_channels.notnull()].stim_channels == (0, )) grouped = session.groupby("phase_type") counts = grouped.count() assert len(counts.index) == 4 assert counts.loc["BASELINE"].listno / words_per_list == 4 assert counts.loc["NON-STIM"].listno / words_per_list == 7 assert counts.loc["STIM"].listno / words_per_list == 11 assert counts.loc["PS"].listno / words_per_list == 4 assert all(session[session["phase_type"] == "PRACTICE"].listno == 0) for n in range(1, 4): assert n in session[session["phase_type"] == "BASELINE"].listno.unique() for n in range(4, 8): assert n in session[session["phase_type"] == "PS"].listno.unique() for n in range(8, 26): assert session[session.listno == n]["phase_type"].isin( ["STIM", "NON-STIM"]).all()
def test_generate_learn1_blocks(self, iteration): session = listgen.fr.generate_session_pool() pool = listgen.assign_list_types(session, 4, 6, 16) stimspec = {(0, ): 5, (1, ): 5, (0, 1): 6} pool = listgen.assign_multistim(pool, stimspec) blocks = listgen.generate_learn1_blocks(pool, 2, 2, (0, 1)) stim_channels = blocks.stim_channels.unique().tolist() assert (0, 1) in stim_channels assert (0, ) not in stim_channels assert (1, ) not in stim_channels assert len(blocks.blockno.unique()) == 4 assert len(blocks.listno.unique()) == 4 # Verify block list numbers assert len(blocks.block_listno.unique()) == 16 assert list(blocks.block_listno.unique()) == list(range(16)) block_listnos = [] for blockno in range(4): block = blocks[blocks.blockno == blockno] block_listnos.append(sorted(list(block.block_listno.unique()))) assert [block_listnos[i] == block_listnos[i + 1] for i in range(3)] blocks = [blocks[blocks.blockno == n].reset_index() for n in range(4)] for i, block1 in enumerate(blocks): for j, block2 in enumerate(blocks): if i == j: continue with pytest.raises(AssertionError): assert_frame_equal(block1, block2)
def test_generate_rec1_blocks(self): pool = listgen.fr.generate_session_pool() assigned = listgen.assign_list_types(pool, 4, 6, 16, 0) lures = wordpool.load("REC1_lures_en.txt") blocks = listgen.generate_rec1_blocks(assigned, lures) assert isinstance(blocks, pd.DataFrame) assert not all( [s == u for s, u in zip(sorted(blocks.listno), blocks.listno)]) blocks2 = listgen.generate_rec1_blocks(assigned, lures) for n in range(len(blocks.word)): if blocks.word.iloc[0] != blocks2.word.iloc[0]: break assert n < len(blocks.word) # this should be the original index before being reset assert "index" in blocks.columns
def test_assign_multistim(self): words_per_list = 12 session = listgen.fr.generate_session_pool(num_lists=26) stimspec = {(0, ): 4, (1, ): 5, (0, 1): 2} with pytest.raises(AssertionError): listgen.assign_multistim(session, {1: 5}) with pytest.raises(AssertionError): listgen.assign_multistim(session, stimspec) session = listgen.assign_list_types(session, 4, 7, 11, 4) multistim = listgen.assign_multistim(session, stimspec) assert 'stim_channels' in multistim.columns for key, num in stimspec.items(): assert key in list(multistim.stim_channels.unique()) assert len(multistim[multistim.stim_channels == key].stim_channels) / words_per_list == num
def prepare_experiment(self): """Pre-generate all word lists and copy files to the proper locations. """ # Copy word pool to the data directory include_rec = self.config.recognition_enabled self.logger.info("Copying word pool(s) to data directory") self.copy_word_pool(osp.join(self.data_root, self.subject), include_rec) # Generate all session lists and REC blocks self.logger.info("Pre-generating all word lists for %d sessions", self.config.numSessions) all_lists = [] all_rec_blocks = [] all_learning_blocks = [] for session in range(self.config.numSessions): self.logger.info("Pre-generating word lists for session %d", session) if self.family == "FR": gen_pool = listgen.fr.generate_session_pool elif self.family == "catFR": gen_pool = listgen.catfr.generate_session_pool else: raise ExperimentError( "Unexpected experiment family encountered: " + self.name) pool = gen_pool(language=self.language[:2].upper()) n_baseline = self.config.n_baseline n_nonstim = self.config.n_nonstim n_stim = self.config.n_stim n_ps = self.config.n_ps assigned = listgen.assign_list_types(pool, n_baseline, n_nonstim, n_stim, n_ps) # Reassign stim lists for multistim if self.config.experiment in ['FR6', 'catFR6']: stimspec = { 'A': self.config.n_stim_A, 'B': self.config.n_stim_B, 'AB': self.config.n_stim_AB } assert sum(stimspec.values()) == n_stim, \ "Total number of stim lists doesn't match multistim specs" assigned = listgen.assign_multistim(assigned, stimspec) if self.debug: print(assigned) # Create session directory if it doesn't yet exist session_dir = osp.join(self.data_root, self.subject, "session_{:d}".format(session)) try: os.makedirs(session_dir) except OSError: self.logger.warning("Session %d already created", session) # Write assigned list to session folders assigned.to_csv(osp.join(session_dir, "pool.tsv"), sep='\t') # Write .lst files to session folders (used in TotalRecall # during annotation). for listno in sorted(assigned.listno.unique()): name = "{:d}.lst".format(listno) entries = assigned[assigned.listno == listno] entries.word.to_csv(osp.join(session_dir, name), index=False, header=False, encoding='latin1') all_lists.append(assigned) # Generate recognition phase lists if this experiment supports it # and save to session folder if self.config.recognition_enabled: self.logger.info("Pre-generating REC1 blocks for session %d", session) # Load lures # TODO: update when Spanish allowed lures = listgen.LURES_LIST_EN rec_blocks = listgen.generate_rec1_blocks(pool, lures) # Save to session folder rec_blocks.to_json(osp.join(session_dir, "rec_blocks.json"), orient="records") all_rec_blocks.append(rec_blocks) # Generate repeated list learning (LEARN1) blocks if this # experiment needs it if self.config.learning_subtask: self.logger.info("Pre-generating LEARN1 blocks for session %d", session) block = listgen.generate_learn1_blocks(assigned, 2, 2, 'STIM_AB') # save to session folder block.to_csv(osp.join(session_dir, 'learn1_blocks.csv')) all_learning_blocks.append(block) # Store lists in the state self.all_lists = all_lists self.all_rec_blocks = all_rec_blocks self.all_learning_blocks = all_learning_blocks
def prepare_experiment(self): """Pre-generate all word lists and copy files to the proper locations. """ # Copy word pool to the data directory # TODO: only copy lures for tasks using REC1 self.logger.info("Copying word pool(s) to data directory") self.copy_word_pool(osp.join(self.data_root, self.subject), self.config.LANGUAGE, True) # Generate all session lists and REC blocks self.logger.info("Pre-generating all word lists for %d sessions", self.config.numSessions) all_lists = [] all_rec_blocks = [] all_sessions = listgen.pal.generate_n_session_pairs( self.config.numSessions) for session, pool in enumerate(all_sessions): # self.logger.info("Pre-generating word lists for session %d", # session) n_baseline = self.config.n_baseline n_nonstim = self.config.n_nonstim n_stim = self.config.n_stim n_ps = self.config.n_ps if self.debug: print(pool) if self.config.experiment == "PAL3": assigned = listgen.assign_balanced_list_types(pool, n_baseline, n_nonstim, n_stim, n_ps, num_groups=2) else: assigned = listgen.assign_list_types(pool, n_baseline, n_nonstim, n_stim, n_ps) if self.debug: print(assigned) all_lists.append(assigned) # Create session directory if it doesn't yet exist session_dir = osp.join(self.data_root, self.subject, "session_{:d}".format(session)) try: os.makedirs(session_dir) except OSError: self.logger.warning("Session %d already created", session) # Write assigned list to session folders assigned.to_csv(osp.join(session_dir, "pool.tsv"), sep='\t') # Write .lst files to session folders (used in TotalRecall # during annotation). for listno in sorted(assigned.listno.unique()): for i in range(self.config.n_pairs): name = "{:d}_{:d}.lst".format(listno, i) entries = assigned[assigned.listno == listno] with codecs.open(osp.join(session_dir, name), 'w', encoding="utf8") as f: f.writelines(row.word1 + "\n" + row.word2 + "\n" for _, row in entries.iterrows()) # Generate recognition phase lists if this experiment supports it # and save to session folder if self.config.recognition_enabled: self.logger.info("Pre-generating REC1 blocks for session %d", session) # Load lures # TODO: update when Spanish allowed lures = listgen.LURES_LIST_EN rec_blocks = listgen.generate_rec1_blocks(pool, lures) # Save to session folder rec_blocks.to_json(osp.join(session_dir, "rec_blocks.json"), orient="records") all_rec_blocks.append(rec_blocks) # Store lists and REC blocks in the state self.all_lists = all_lists self.all_rec_blocks = all_rec_blocks