def __init__(self): stimulus_set = pd.read_csv( os.path.join(os.path.dirname(__file__), 'imagenet2012.csv')) stimulus_set = StimulusSet(stimulus_set) stimulus_set.image_paths = { row.image_id: row.filepath for row in stimulus_set.itertuples() } self._stimulus_set = stimulus_set self._similarity_metric = Accuracy() ceiling = Score([1, np.nan], coords={'aggregation': ['center', 'error']}, dims=['aggregation']) super(Imagenet2012, self).__init__(identifier='fei-fei.Deng2009-top1', version=1, ceiling_func=lambda: ceiling, parent='ImageNet', bibtex="""@INPROCEEDINGS{5206848, author={J. {Deng} and W. {Dong} and R. {Socher} and L. {Li} and {Kai Li} and {Li Fei-Fei}}, booktitle={2009 IEEE Conference on Computer Vision and Pattern Recognition}, title={ImageNet: A large-scale hierarchical image database}, year={2009}, volume={}, number={}, pages={248-255}, url = {https://ieeexplore.ieee.org/document/5206848} }""")
def collect_synth(h5, data_dir): protos_stimuli = [] responses_synth_d = {} for monkey in h5.root.images.synthetic: for setting in monkey: for session_images in setting: session_neural = h5.root.neural.synthetic[monkey._v_name][ setting._v_name][session_images._v_name] session_target_inds = h5.root.target_inds[monkey._v_name][ setting._v_name][session_images._v_name] identifier = f"{monkey._v_name[-1]}_{setting._v_name}_{session_images._v_name}" img_temp_path = data_dir / "images_temp" / "synthetic" / identifier img_temp_path.mkdir(parents=True, exist_ok=True) proto_stimuli = np_to_png(session_images, img_temp_path) proto_stimuli["animal"] = monkey._v_name proto_stimuli["setting"] = setting._v_name proto_stimuli["session"] = session_images._v_name protos_stimuli.append(proto_stimuli) proto_neural = np_to_xr(monkey, setting, session_neural, proto_stimuli, session_target_inds, "synth") proto_neural = NeuronRecordingAssembly(proto_neural) responses_synth_d[proto_neural.name] = proto_neural proto_stimuli_all = pd.concat(protos_stimuli, axis=0) assert len(np.unique( proto_stimuli_all['image_id'])) == len(proto_stimuli_all) stimuli = StimulusSet(proto_stimuli_all) stimuli.image_paths = { row.image_id: row.image_current_local_file_path for row in stimuli.itertuples() } return stimuli, responses_synth_d
def collect_stimuli(data_dir): stimulus_set = pd.read_csv(data_dir / 'imagenet2012.csv') stimulus_set = StimulusSet(stimulus_set) stimulus_set.image_paths = {row.image_id: row.filepath for row in stimulus_set.itertuples()} stimulus_set['image_path_within_store'] = stimulus_set['filename'].apply( lambda filename: os.path.splitext(filename)[0]) stimulus_set = stimulus_set[['image_id', 'label', 'synset', 'image_file_sha1', 'image_path_within_store']] assert len(np.unique(stimulus_set['image_id'])) == len(stimulus_set), "duplicate entries" return stimulus_set
def collect_stimuli_nat(h5, data_dir): img_array = h5.root.images.naturalistic img_temp_path = data_dir / "images_temp" / "naturalistic" img_temp_path.mkdir(parents=True, exist_ok=True) proto = np_to_png(img_array, img_temp_path) assert len(np.unique(proto['image_id'])) == len(proto) stimuli = StimulusSet(proto) stimuli.image_paths = { row.image_id: row.image_current_local_file_path for row in stimuli.itertuples() } return stimuli
def prep_proto_stim(): image_dir = Path(__file__).parent / "images" csv_path = image_dir / "test_images.csv" proto = pd.read_csv(csv_path) proto["image_id"] = [f"{iid}.{now()}" for iid in proto["image_id"]] proto[f"test_{now()}"] = [f"{iid}.{now()}" for iid in proto["image_id"]] proto = StimulusSet(proto) proto.image_paths = { row.image_id: image_dir / row.image_current_relative_file_path for row in proto.itertuples() } proto['image_file_name'] = proto['image_path_within_store'] return proto
def __init__(self): stimulus_set = pd.read_csv( os.path.join(os.path.dirname(__file__), 'imagenet2012.csv')) stimulus_set = StimulusSet(stimulus_set) stimulus_set.image_paths = { row.image_id: row.filepath for row in stimulus_set.itertuples() } self._stimulus_set = stimulus_set self._similarity_metric = Accuracy() ceiling = Score([1, np.nan], coords={'aggregation': ['center', 'error']}, dims=['aggregation']) super(Imagenet2012, self).__init__( identifier='fei-fei.Deng2009-top1', version=1, ceiling_func=lambda: ceiling, parent='ImageNet', paper_link="https://ieeexplore.ieee.org/abstract/document/5206848")
def collect_stimuli(data_path): stimuli = [] # search images for i in range(1, 241): target_path = os.path.join(data_path / 'stimuli', 's_' + str(i) + '.jpg') filename = 's_' + str(i) + '.jpg' image_id = 'klab_vs_naturaldesign_stimuli_' + str(i) image_label = 'stimuli' sample_number = i stimuli.append({ 'image_current_local_file_path': target_path, 'image_path_within_store': filename, 'image_label': image_label, 'image_id': image_id, 'sample_number': sample_number, }) # target images for i in range(1, 241): target_path = os.path.join(data_path / 'target', 't_' + str(i) + '.jpg') filename = 't_' + str(i) + '.jpg' image_id = 'klab_vs_naturaldesign_target_' + str(i) image_label = 'target' sample_number = i stimuli.append({ 'image_current_local_file_path': target_path, 'image_path_within_store': filename, 'image_label': image_label, 'image_id': image_id, 'sample_number': sample_number, }) # target mask for i in range(1, 241): target_path = os.path.join(data_path / 'gt', 'gt_' + str(i) + '.jpg') filename = 'gt_' + str(i) + '.jpg' image_id = 'klab_vs_naturaldesign_gt_' + str(i) image_label = 'gt' sample_number = i stimuli.append({ 'image_current_local_file_path': target_path, 'image_path_within_store': filename, 'image_label': image_label, 'image_id': image_id, 'sample_number': sample_number, }) stimuli = StimulusSet(stimuli) stimuli.image_paths = { row.image_id: row.image_current_local_file_path for row in stimuli.itertuples() } stimuli['image_file_name'] = stimuli['image_path_within_store'] return stimuli
def collect_stimuli(data_path): stimuli = [] arr_loc = loadmat(os.path.join(data_path / 'gt', 'array.mat')) for i in range(1, 301): j = (np.argmax(arr_loc['MyData'][i - 1]['arraycate'][0] == arr_loc['MyData'][i - 1]['targetcate'][0])) target_path = os.path.join(data_path / 'stimuli', 'array_' + str(i) + '.jpg') filename = 'array_' + str(i) + '.jpg' image_id = 'stimuli_' + str(i) image_label = 'stimuli' sample_number = i stimuli.append({ 'image_current_local_file_path': target_path, 'image_path_within_store': filename, 'image_label': image_label, 'image_id': image_id, 'tar_obj_pos': j, 'sample_number': sample_number, }) for i in range(1, 301): j = (np.argmax(arr_loc['MyData'][i - 1]['arraycate'][0] == arr_loc['MyData'][i - 1]['targetcate'][0])) target_path = os.path.join(data_path / 'target', 'target_' + str(i) + '.jpg') filename = 'target_' + str(i) + '.jpg' image_id = 'target_' + str(i) image_label = 'target' sample_number = i stimuli.append({ 'image_current_local_file_path': target_path, 'image_path_within_store': filename, 'image_label': image_label, 'image_id': image_id, 'tar_obj_pos': j, 'sample_number': sample_number, }) for i in range(1, 7): target_path = os.path.join(data_path / 'gt', 'mask' + str(i) + '.jpg') filename = 'mask' + str(i) + '.jpg' image_id = 'mask_' + str(i) image_label = 'mask' sample_number = i stimuli.append({ 'image_current_local_file_path': target_path, 'image_path_within_store': filename, 'image_label': image_label, 'image_id': image_id, 'tar_obj_pos': i - 1, 'sample_number': sample_number, }) stimuli = StimulusSet(stimuli) stimuli.image_paths = { row.image_id: row.image_current_local_file_path for row in stimuli.itertuples() } stimuli['image_file_name'] = stimuli['image_path_within_store'] return stimuli