Ejemplo n.º 1
0
def load_responses(response_file, stimuli):
    neural_response_file = response_file + "neural_responses.npy"
    neural_responses = np.load(neural_response_file)

    brodmann_file = response_file + "brodmann_areas.npy"
    brodmann_locations = np.load(brodmann_file)

    assembly = xr.DataArray(
        neural_responses,
        coords={
            'image_num':
            ('presentation', list(range(neural_responses.shape[0]))),
            'image_id': ('presentation', [
                stimuli['image_id'][stimuli['image_number'] == num].values[0]
                for num in range(neural_responses.shape[0])
            ]),
            'region': ('neuroid', brodmann_locations),
            'neuroid_id': ('neuroid', list(range(neural_responses.shape[1]))),
            'time': ('time_bin', np.linspace(0, 1, 32)),
            'time_bin_start': ('time_bin', np.arange(0, 1000, 31.25)),
            'time_bin_end': ('time_bin', np.arange(31.25, 1001, 31.25))
        },
        dims=['presentation', 'neuroid', 'time_bin'])

    assembly = NeuronRecordingAssembly(assembly)

    assembly = assembly.transpose('presentation', 'neuroid', 'time_bin')
    return assembly
Ejemplo n.º 2
0
def load_responses(response_file, stimuli):
    responses = h5py.File(response_file, 'r')
    assemblies = []
    neuroid_id_offset = 0
    for monkey in responses.keys():
        spike_rates = responses[monkey]['rates']
        assembly = xr.DataArray(spike_rates.value,
                                coords={
                                    'image_num': ('image_id', list(range(spike_rates.shape[0]))),
                                    'image_id': ('image_id', [
                                        stimuli['image_id'][stimuli['image_number'] == num].values[0]
                                        for num in range(spike_rates.shape[0])]),
                                    'neuroid_id': ('neuroid', list(
                                        range(neuroid_id_offset, neuroid_id_offset + spike_rates.shape[1]))),
                                    'region': ('neuroid', ['IT'] * spike_rates.shape[1]),
                                    'monkey': ('neuroid', [monkey] * spike_rates.shape[1]),
                                    'repetition': list(range(spike_rates.shape[2])),
                                },
                                dims=['image_id', 'neuroid', 'repetition'])
        assemblies.append(assembly)
        neuroid_id_offset += spike_rates.shape[1]
    assembly = xr.concat(assemblies, 'neuroid')
    assembly = assembly.stack(presentation=['image_id', 'repetition'])
    assembly = NeuronRecordingAssembly(assembly)
    assert len(assembly['presentation']) == 1600 * 45
    assert len(np.unique(assembly['image_id'])) == 1600
    assert len(assembly.sel(monkey='nano')['neuroid']) == len(assembly.sel(monkey='magneto')['neuroid']) == 288
    assert len(assembly['neuroid']) == len(np.unique(assembly['neuroid_id'])) == 288 * 2
    return assembly
Ejemplo n.º 3
0
def collect_synth(h5, data_dir):
    protos_stimuli = []
    responses_synth_d = {}
    for monkey in h5.root.images.synthetic:
        for setting in monkey:
            for session_images in setting:
                session_neural = h5.root.neural.synthetic[monkey._v_name][
                    setting._v_name][session_images._v_name]
                session_target_inds = h5.root.target_inds[monkey._v_name][
                    setting._v_name][session_images._v_name]

                identifier = f"{monkey._v_name[-1]}_{setting._v_name}_{session_images._v_name}"
                img_temp_path = data_dir / "images_temp" / "synthetic" / identifier
                img_temp_path.mkdir(parents=True, exist_ok=True)
                proto_stimuli = np_to_png(session_images, img_temp_path)
                proto_stimuli["animal"] = monkey._v_name
                proto_stimuli["setting"] = setting._v_name
                proto_stimuli["session"] = session_images._v_name
                protos_stimuli.append(proto_stimuli)

                proto_neural = np_to_xr(monkey, setting, session_neural,
                                        proto_stimuli, session_target_inds,
                                        "synth")
                proto_neural = NeuronRecordingAssembly(proto_neural)
                responses_synth_d[proto_neural.name] = proto_neural

    proto_stimuli_all = pd.concat(protos_stimuli, axis=0)
    assert len(np.unique(
        proto_stimuli_all['image_id'])) == len(proto_stimuli_all)
    stimuli = StimulusSet(proto_stimuli_all)
    stimuli.image_paths = {
        row.image_id: row.image_current_local_file_path
        for row in stimuli.itertuples()
    }
    return stimuli, responses_synth_d
def load_responses(data_dir, stimuli):
    IT_base616 = pickle.load(
        open(os.path.join(data_dir, 'data_IT_base616.pkl'), 'rb'))
    features = IT_base616[
        'IT_features']  # Shaped images x neuroids x repetitions x time_bins

    # Drop all time_bins except the fifth, which corresponds to 70-170ms
    # For future reference the time-bins are as follows:
    # 70-120ms, 120-170ms, 170-220ms, 220-270ms, 70-170ms, 170-270ms, 70-270ms
    features = features[:, :, :, 4]
    features = features[:, :, :, np.newaxis]
    # Drop all repetitions beyond 33rd (all neuroids have at least 27, but none have greater than 33)
    features = features[:, :, :33, :]

    neuroid_meta = pickle._Unpickler(
        open(os.path.join(data_dir, 'IT_neural_meta_full.pkl'), 'rb'))
    neuroid_meta.encoding = 'latin1'
    neuroid_meta = neuroid_meta.load()

    assembly = xr.DataArray(
        features,
        coords={
            'region': ('neuroid', ['IT'] * len(neuroid_meta)),
            'neuroid_id': ('neuroid', list(range(features.shape[1]))),
            'time_bin_start': ('time_bin', [70]),
            'time_bin_stop': ('time_bin', [170]),
            'repetition': ('repetition', list(range(features.shape[2])))
        },
        dims=['image', 'neuroid', 'repetition', 'time_bin'])

    for column_name, column_data in neuroid_meta.iteritems():
        assembly = assembly.assign_coords(
            **{f'{column_name}': ('neuroid', list(column_data.values))})

    for column_name, column_data in stimuli.iteritems():
        assembly = assembly.assign_coords(
            **{f'{column_name}': ('image', list(column_data.values))})

    # Collapse dimensions 'image' and 'repetitions' into a single 'presentation' dimension
    assembly = assembly.stack(
        presentation=('image', 'repetition')).reset_index('presentation')
    assembly = assembly.drop('image')

    assembly = NeuronRecordingAssembly(assembly)
    assembly = assembly.transpose('presentation', 'neuroid', 'time_bin')

    return assembly
Ejemplo n.º 5
0
def collect_responses_nat(h5, stimuli):
    responses_nat_d = {}
    for monkey in h5.root.neural.naturalistic:
        for setting in monkey:
            for session in setting:
                target_inds_session = h5.root.target_inds[monkey._v_name][
                    setting._v_name][session._v_name]
                proto = np_to_xr(monkey, setting, session, stimuli,
                                 target_inds_session, "nat")
                proto = NeuronRecordingAssembly(proto)
                responses_nat_d[proto.name] = proto
    return responses_nat_d
Ejemplo n.º 6
0
def load_responses(response_file, stimuli):
    responses = h5py.File(response_file, 'r')
    assemblies = []
    neuroid_id_offset = 0
    for monkey in responses.keys():
        spike_rates = responses[monkey]['rates']
        assembly = xr.DataArray(spike_rates.value,
                                coords={
                                    'image_num': ('image_id', list(range(spike_rates.shape[0]))),
                                    'image_id': ('image_id', [
                                        stimuli['image_id'][stimuli['image_number'] == num].values[0]
                                        for num in range(spike_rates.shape[0])]),
                                    'neuroid_id': ('neuroid', list(
                                        range(neuroid_id_offset, neuroid_id_offset + spike_rates.shape[1]))),
                                    'region': ('neuroid', ['IT'] * spike_rates.shape[1]),
                                    'monkey': ('neuroid', [monkey] * spike_rates.shape[1]),
                                    'repetition': list(range(spike_rates.shape[2])),
                                },
                                dims=['image_id', 'neuroid', 'repetition'])
        assemblies.append(assembly)
        neuroid_id_offset += spike_rates.shape[1]
    assembly = xr.concat(assemblies, 'neuroid')
    assembly = assembly.stack(presentation=['image_id', 'repetition'])
    assembly = NeuronRecordingAssembly(assembly)
    assert len(assembly['presentation']) == 1600 * 45
    assert len(np.unique(assembly['image_id'])) == 1600
    assert len(assembly.sel(monkey='nano')['neuroid']) == len(assembly.sel(monkey='magneto')['neuroid']) == 288
    assert len(assembly['neuroid']) == len(np.unique(assembly['neuroid_id'])) == 288 * 2
    # filter noisy electrodes
    assembly = filter_neuroids(assembly, threshold=.7)
    # add time info
    assembly = assembly.expand_dims('time_bin')
    assembly['time_bin_start'] = 'time_bin', [70]
    assembly['time_bin_end'] = 'time_bin', [170]
    assembly = assembly.transpose('presentation', 'neuroid', 'time_bin')
    return assembly
def load_responses(data_dir, stimuli):
    data_dir = data_dir / 'database'
    assert os.path.isdir(data_dir)
    psth = np.load(data_dir / 'solo.rsvp.things-2.experiment_psth.npy')  # Shaped images x repetitions x time_bins x channels

    # Compute firing rate for given time bins
    timebins = [[70, 170], [170, 270], [50, 100], [100, 150], [150, 200], [200, 250], [70, 270]]
    photodiode_delay = 30  # Delay recorded on photodiode is ~30ms
    timebase = np.arange(-100, 381, 10)  # PSTH from -100ms to 380ms relative to stimulus onset
    assert len(timebase) == psth.shape[2]
    rate = np.empty((len(timebins), psth.shape[0], psth.shape[1], psth.shape[3]))
    for idx, tb in enumerate(timebins):
        t_cols = np.where((timebase >= (tb[0] + photodiode_delay)) & (timebase < (tb[1] + photodiode_delay)))[0]
        rate[idx] = np.mean(psth[:, :, t_cols, :], axis=2)  # Shaped time bins x images x repetitions x channels

    assembly = xr.DataArray(rate,
                            coords={'repetition': ('repetition', list(range(rate.shape[2]))),
                                    'time_bin_id': ('time_bin', list(range(rate.shape[0]))),
                                    'time_bin_start': ('time_bin', [x[0] for x in timebins]),
                                    'time_bin_stop': ('time_bin', [x[1] for x in timebins])},
                            dims=['time_bin', 'image', 'repetition', 'neuroid'])

    # Add neuroid related meta data
    neuroid_meta = pd.DataFrame(json.load(open(data_dir.parent / 'array-metadata' / 'mapping.json')))
    for column_name, column_data in neuroid_meta.iteritems():
        assembly = assembly.assign_coords(**{f'{column_name}': ('neuroid', list(column_data.values))})

    # Add stimulus related meta data
    for column_name, column_data in stimuli.iteritems():
        assembly = assembly.assign_coords(**{f'{column_name}': ('image', list(column_data.values))})

    # Collapse dimensions 'image' and 'repetitions' into a single 'presentation' dimension
    assembly = assembly.stack(presentation=('image', 'repetition')).reset_index('presentation')
    assembly = assembly.drop('image')
    assembly = NeuronRecordingAssembly(assembly)

    # Filter noisy electrodes
    psth = np.load(data_dir / 'solo.rsvp.things-2.normalizer_psth.npy')
    t_cols = np.where((timebase >= (70 + photodiode_delay)) & (timebase < (170 + photodiode_delay)))[0]
    rate = np.mean(psth[:, :, t_cols, :], axis=2)
    normalizer_assembly = xr.DataArray(rate,
                                       coords={'repetition': ('repetition', list(range(rate.shape[1]))),
                                               'image_id': ('image', list(range(rate.shape[0]))),
                                               'id': ('image', list(range(rate.shape[0])))},
                                       dims=['image', 'repetition', 'neuroid'])
    for column_name, column_data in neuroid_meta.iteritems():
        normalizer_assembly = normalizer_assembly.assign_coords(
            **{f'{column_name}': ('neuroid', list(column_data.values))})
    normalizer_assembly = normalizer_assembly.stack(presentation=('image', 'repetition')).reset_index('presentation')
    normalizer_assembly = normalizer_assembly.drop('image')
    normalizer_assembly = normalizer_assembly.transpose('presentation', 'neuroid')
    normalizer_assembly = NeuronRecordingAssembly(normalizer_assembly)

    filtered_assembly = filter_neuroids(normalizer_assembly, 0.7)
    assembly = assembly.sel(neuroid=np.isin(assembly.neuroid_id, filtered_assembly.neuroid_id))
    assembly = assembly.transpose('presentation', 'neuroid', 'time_bin')

    # Add other experiment and data processing related info
    assembly.attrs['image_size_degree'] = 8
    assembly.attrs['stim_on_time_ms'] = 100

    return assembly
Ejemplo n.º 8
0
def load_responses(data_dir, stimuli):
    psth = np.load(data_dir / 'solo.rsvp.hvm.experiment_psth.npy'
                   )  # Shaped images x repetitions x time_bins x channels

    # Drop first (index 0) and second last session (index 25) since they had only one repetition each
    # Actually not, since we're sticking to older protocol re: data cleaning for now
    # psth = np.delete(psth, (0, 25), axis=1)

    # Compute firing rate for given time bins
    timebins = [[70, 170], [170, 270], [50, 100], [100, 150], [150, 200],
                [200, 250], [70, 270]]
    photodiode_delay = 30  # Delay recorded on photodiode is ~30ms
    timebase = np.arange(
        -100, 381, 10)  # PSTH from -100ms to 380ms relative to stimulus onset
    assert len(timebase) == psth.shape[2]
    rate = np.empty(
        (len(timebins), psth.shape[0], psth.shape[1], psth.shape[3]))
    for idx, tb in enumerate(timebins):
        t_cols = np.where((timebase >= (tb[0] + photodiode_delay))
                          & (timebase < (tb[1] + photodiode_delay)))[0]
        rate[idx] = np.mean(
            psth[:, :, t_cols, :],
            axis=2)  # Shaped time bins x images x repetitions x channels

    # Load image related meta data (id ordering differs from dicarlo.hvm)
    image_id = [
        x.split()[0][:-4] for x in open(data_dir.parent / 'image-metadata' /
                                        'hvm_map.txt').readlines()
    ]
    # Load neuroid related meta data
    neuroid_meta = pd.DataFrame(
        json.load(open(data_dir.parent / 'array-metadata' / 'mapping.json')))

    assembly = xr.DataArray(
        rate,
        coords={
            'repetition': ('repetition', list(range(rate.shape[2]))),
            'time_bin_id': ('time_bin', list(range(rate.shape[0]))),
            'time_bin_start': ('time_bin', [x[0] for x in timebins]),
            'time_bin_stop': ('time_bin', [x[1] for x in timebins]),
            'image_id': ('image', image_id)
        },
        dims=['time_bin', 'image', 'repetition', 'neuroid'])

    for column_name, column_data in neuroid_meta.iteritems():
        assembly = assembly.assign_coords(
            **{f'{column_name}': ('neuroid', list(column_data.values))})

    assembly = assembly.sortby(assembly.image_id)
    stimuli = stimuli.sort_values(by='image_id').reset_index(drop=True)
    for column_name, column_data in stimuli.iteritems():
        assembly = assembly.assign_coords(
            **{f'{column_name}': ('image', list(column_data.values))})
    assembly = assembly.sortby(
        assembly.id)  # Re-order by id to match dicarlo.hvm ordering

    # Collapse dimensions 'image' and 'repetitions' into a single 'presentation' dimension
    assembly = assembly.stack(
        presentation=('image', 'repetition')).reset_index('presentation')
    assembly = NeuronRecordingAssembly(assembly)

    # Filter noisy electrodes
    psth = np.load(data_dir / 'solo.rsvp.hvm.normalizer_psth.npy')
    t_cols = np.where((timebase >= (70 + photodiode_delay))
                      & (timebase < (170 + photodiode_delay)))[0]
    rate = np.mean(psth[:, :, t_cols, :], axis=2)
    normalizer_assembly = xr.DataArray(
        rate,
        coords={
            'repetition': ('repetition', list(range(rate.shape[1]))),
            'image_id': ('image', list(range(rate.shape[0]))),
            'id': ('image', list(range(rate.shape[0])))
        },
        dims=['image', 'repetition', 'neuroid'])
    for column_name, column_data in neuroid_meta.iteritems():
        normalizer_assembly = normalizer_assembly.assign_coords(
            **{f'{column_name}': ('neuroid', list(column_data.values))})
    normalizer_assembly = normalizer_assembly.stack(
        presentation=('image', 'repetition')).reset_index('presentation')
    normalizer_assembly = normalizer_assembly.drop('image')
    normalizer_assembly = normalizer_assembly.transpose(
        'presentation', 'neuroid')
    normalizer_assembly = NeuronRecordingAssembly(normalizer_assembly)

    filtered_assembly = filter_neuroids(normalizer_assembly, 0.7)
    assembly = assembly.sel(
        neuroid=np.isin(assembly.neuroid_id, filtered_assembly.neuroid_id))
    assembly = assembly.transpose('presentation', 'neuroid', 'time_bin')

    # Add other experiment related info
    assembly.attrs['image_size_degree'] = 8
    assembly.attrs['stim_on_time_ms'] = 100

    return assembly