예제 #1
0
def mountainsort4(recording: str, sorting_out: str) -> str:
    import spiketoolkit as st
    import spikesorters as ss
    from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor
    import kachery as ka

    # TODO: need to think about how to deal with this
    ka.set_config(fr='default_readonly')

    recording = AutoRecordingExtractor(dict(path=recording), download=True)

    # recording = se.SubRecordingExtractor(parent_recording=recording, start_frame=0, end_frame=30000 * 10)

    # Preprocessing
    print('Preprocessing...')
    recording = st.preprocessing.bandpass_filter(recording,
                                                 freq_min=300,
                                                 freq_max=6000)
    recording = st.preprocessing.whiten(recording)

    # Sorting
    print('Sorting...')
    sorter = ss.Mountainsort4Sorter(recording=recording,
                                    output_folder='/tmp/tmp_mountainsort4_' +
                                    _random_string(8),
                                    delete_output_folder=True)

    sorter.set_params(detect_sign=-1, adjacency_radius=50, detect_threshold=4)
    timer = sorter.run()
    print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
예제 #2
0
def main():
    parser = argparse.ArgumentParser(description='Run spike sorting using MountainSort4.')
    parser.add_argument('recording_path', help='Path (or kachery-path) to the file or directory defining the sorting')
    parser.add_argument('--output', help='The output directory', required=True)

    args = parser.parse_args()
    recording_path = args.recording_path
    output_dir = args.output

    _mkdir_if_needed(output_dir, require_empty=True)

    ka.set_config(fr='default_readonly')

    recording = AutoRecordingExtractor(dict(path=recording_path), download=True)

    # Preprocessing
    print('Preprocessing...')
    recording = st.preprocessing.bandpass_filter(recording, freq_min=300, freq_max=6000)
    recording = st.preprocessing.common_reference(recording, reference='median')

    # Sorting
    print('Sorting...')
    sorting = ss.run_mountainsort4(recording, output_folder='/tmp/tmpdir', delete_output_folder=True)

    se.MdaSortingExtractor.write_sorting(sorting=sorting, save_path=output_dir + '/firings.mda')
예제 #3
0
def kilosort2(recording, sorting_out):
    from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor
    from ._kilosort2sorter import Kilosort2Sorter
    import kachery as ka

    # TODO: need to think about how to deal with this
    ka.set_config(fr='default_readonly')

    recording = AutoRecordingExtractor(dict(path=recording), download=True)

    # recording = se.SubRecordingExtractor(parent_recording=recording, start_frame=0, end_frame=30000 * 10)
    
    # Sorting
    print('Sorting...')
    sorter = Kilosort2Sorter(
        recording=recording,
        output_folder='/tmp/tmp_kilosort2_' + _random_string(8),
        delete_output_folder=True
    )

    sorter.set_params(
        detect_sign=-1,
        detect_threshold=5,
        freq_min=150,
        pc_per_chan=3
    )     
    timer = sorter.run()
    print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
예제 #4
0
 def __init__(self, arg, samplerate=None):
     super().__init__()
     self._hash = None
     if isinstance(arg, se.SortingExtractor):
         self._sorting = arg
         self.copy_unit_properties(sorting=self._sorting)
     else:
         self._sorting = None
         if type(arg) == str:
             arg = dict(path=arg, samplerate=samplerate)
         if type(arg) == dict:
             if 'kachery_config' in arg:
                 ka.set_config(**arg['kachery_config'])
             if 'path' in arg:
                 path = arg['path']
                 if ka.get_file_info(path):
                     file_path = ka.load_file(path)
                     if not file_path:
                         raise Exception(
                             'Unable to realize file: {}'.format(path))
                     self._init_from_file(file_path,
                                          original_path=path,
                                          kwargs=arg)
                 else:
                     raise Exception('Not a file: {}'.format(path))
             else:
                 raise Exception('Unable to initialize sorting extractor')
         else:
             raise Exception(
                 'Unable to initialize sorting extractor (unexpected type)')
예제 #5
0
def main():
    parser = argparse.ArgumentParser(
        description='Run spike sorting using SpyKING CIRCUS.')
    parser.add_argument(
        'recording_path',
        help=
        'Path (or kachery-path) to the file or directory defining the sorting')
    parser.add_argument('--output', help='The output directory', required=True)

    args = parser.parse_args()
    recording_path = args.recording_path
    output_dir = args.output

    _mkdir_if_needed(output_dir, require_empty=True)

    ka.set_config(fr='default_readonly')

    recording = AutoRecordingExtractor(dict(path=recording_path),
                                       download=True)

    # Sorting
    print('Sorting...')
    sorting = ss.run_spykingcircus(recording,
                                   output_folder='/tmp/tmpdir',
                                   delete_output_folder=True)

    se.MdaSortingExtractor.write_sorting(sorting=sorting,
                                         save_path=output_dir + '/firings.mda')
예제 #6
0
def main():
    parser = argparse.ArgumentParser(description='Run spike sorting using MountainSort4.')
    parser.add_argument('recording_path', help='Path (or kachery-path) to the file or directory defining the sorting')
    parser.add_argument('--output', help='The output directory', required=True)

    args = parser.parse_args()
    recording_path = args.recording_path
    output_dir = args.output

    _mkdir_if_needed(output_dir, require_empty=True)

    ka.set_config(fr='default_readonly')

    recording = AutoRecordingExtractor(dict(path=recording_path), download=True)

    # Sorting
    print('Sorting...')

    sorter = Kilosort2Sorter(
        recording=recording,
        output_folder='/tmp/tmpdir',
        delete_output_folder = False # will be taken care by _keep_temp_files one step above
    )

    sorter.set_params(
        detect_sign=-1,
        detect_threshold=5,
        freq_min=150,
        pc_per_chan=3
    )     
    timer = sorter.run()
    print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    se.MdaSortingExtractor.write_sorting(sorting=sorting, save_path=output_dir + '/firings.mda')
예제 #7
0
def main():
    parser = argparse.ArgumentParser(description='Run spike sorting using MountainSort4.')
    parser.add_argument('recording_path', help='Path (or kachery-path) to the file or directory defining the sorting')
    parser.add_argument('--output', help='The output directory', required=True)

    args = parser.parse_args()
    recording_path = args.recording_path
    output_dir = args.output

    _mkdir_if_needed(output_dir, require_empty=True)

    ka.set_config(fr='default_readonly')

    recording = AutoRecordingExtractor(dict(path=recording_path), download=True)

    # Sorting
    print('Sorting...')
    # IronClustSorter.set_ironclust_path('/src/ironclust')

    sorter = IronClustSorter(
        recording=recording,
        output_folder='/tmp/tmpdir',
        delete_output_folder = False # will be taken care by _keep_temp_files one step above
    )

    sorter.set_params(
        detect_sign=-1,
        adjacency_radius=50,
        adjacency_radius_out=75,
        detect_threshold=4,
        prm_template_name='',
        freq_min=300,
        freq_max=8000,
        merge_thresh=0.99,
        pc_per_chan=0,
        whiten=False,
        filter_type='bandpass',
        filter_detect_type='none',
        common_ref_type='mean',
        batch_sec_drift=300,
        step_sec_drift=20,
        knn=30,
        min_count=30,
        fGpu=True,
        fft_thresh=8,
        fft_thresh_low=0,
        nSites_whiten=32,
        feature_type='gpca',
        delta_cut=1,
        post_merge_mode=1,
        sort_mode=1
    )     
    timer = sorter.run()
    print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    se.MdaSortingExtractor.write_sorting(sorting=sorting, save_path=output_dir + '/firings.mda')
예제 #8
0
def _pjh_run_job(pipe_to_parent: Connection, job: Dict[str, Any], kachery_config: dict) -> None:
    import kachery as ka
    ka.set_config(**kachery_config)
    hither._run_job(job)
    pipe_to_parent.send(job['result'].serialize())
    # wait for message to return
    while True:
        if pipe_to_parent.poll():
            pipe_to_parent.recv()
            return
        time.sleep(0.1)
예제 #9
0
    def __init__(self, arg, download=False):
        super().__init__()
        self._hash = None
        if isinstance(arg, str):
            arg = dict(path=arg)
        if isinstance(arg, se.RecordingExtractor):
            self._recording = arg
        else:
            self._recording = None

            # filters
            if ('recording' in arg) and ('filters' in arg):
                recording1 = AutoRecordingExtractor(arg['recording'])
                self._recording = self._apply_filters(recording1,
                                                      arg['filters'])
                return

            if 'kachery_config' in arg:
                ka.set_config(**arg['kachery_config'])
            path = arg.get('path', '')
            if 'nwb_path' in arg:
                self._recording = NwbElectricalSeriesRecordingExtractor(
                    path=path, nwb_path=arg['nwb_path'])
            elif path.endswith('.mda'):
                if 'samplerate' not in arg:
                    raise Exception('Missing argument: samplerate')
                samplerate = arg['samplerate']
                self._recording = MdaRecordingExtractor(timeseries_path=path,
                                                        samplerate=samplerate,
                                                        download=download)
                hash0 = _sha1_of_object(
                    dict(timeseries_sha1=ka.get_file_info(
                        path, algorithm='sha1')['sha1'],
                         samplerate=samplerate))
                setattr(self, 'hash', hash0)
            elif path.endswith('.nwb.json'):
                self._recording = NwbJsonRecordingExtractor(file_path=path)
                hash0 = ka.get_file_info(path)['sha1']
                setattr(self, 'hash', hash0)
            elif path.endswith('.json') and (not path.endswith('.nwb.json')):
                obj = ka.load_object(path)
                if ('raw' in obj) and ('params' in obj) and ('geom' in obj):
                    self._recording = MdaRecordingExtractor(
                        timeseries_path=obj['raw'],
                        samplerate=obj['params']['samplerate'],
                        geom=np.array(obj['geom']))
                else:
                    raise Exception('Problem initializing recording extractor')
            elif ka.get_file_info(path + '/raw.mda'):
                self._recording = MdaRecordingExtractor(
                    recording_directory=path, download=download)
            else:
                raise Exception('Unable to initialize recording extractor.')
        self.copy_channel_properties(recording=self._recording)
예제 #10
0
def _read_header(path, verbose=True):
    info0 = ka.get_file_info(path)
    if info0 is None:
        raise Exception(f'Unable to find file: {path}')
    bytes0 = ka.load_bytes(path, start=0, end=min(200, info0['size']))
    if bytes0 is None:
        ka.set_config(fr='default_readonly')
        print(ka.get_file_info(path))
        raise Exception('Unable to load header bytes from {}'.format(path))
    f = io.BytesIO(bytes0)
    try:
        dt_code = _read_int32(f)
        _ = _read_int32(f)  # num bytes per entry
        num_dims = _read_int32(f)
        uses64bitdims = False
        if (num_dims < 0):
            uses64bitdims = True
            num_dims = -num_dims
        if (num_dims < 1) or (num_dims >
                              6):  # allow single dimension as of 12/6/17
            if verbose:
                print("Invalid number of dimensions: {}".format(num_dims))
            f.close()
            return None
        dims = []
        dimprod = 1
        if uses64bitdims:
            for _ in range(0, num_dims):
                tmp0 = _read_int64(f)
                dimprod = dimprod * tmp0
                dims.append(tmp0)
        else:
            for _ in range(0, num_dims):
                tmp0 = _read_int32(f)
                dimprod = dimprod * tmp0
                dims.append(tmp0)
        dt = _dt_from_dt_code(dt_code)
        if dt is None:
            if verbose:
                print("Invalid data type code: {}".format(dt_code))
            f.close()
            return None
        H = MdaHeader(dt, dims)
        if (uses64bitdims):
            H.uses64bitdims = True
            H.header_size = 3 * 4 + H.num_dims * 8
        f.close()
        return H
    except Exception as e:  # catch *all* exceptions
        if verbose:
            print(e)
        f.close()
        return None
예제 #11
0
def _set_upload_config_from_parsed_args(args):
    to = args.to or None
    url = args.url or None
    channel = args.channel or None
    password = args.password or None
    remote_only = args.remote_only

    if to is not None:
        if url is not None or channel is not None or password is not None:
            raise Exception(
                'Cannot use --url or --channel or --password together with --to'
            )
        ka.set_config(to=to, to_remote_only=remote_only)
    else:
        ka.set_config(to=dict(url=url, channel=channel, password=password),
                      to_remote_only=remote_only)
예제 #12
0
def _set_download_config_from_parsed_args(args):
    fr = args.fr or None
    url = args.url or None
    channel = args.channel or None
    password = args.password or None
    remote_only = args.remote_only

    if fr is not None:
        if url is not None or channel is not None or password is not None:
            raise Exception(
                'Cannot use --url or --channel or --password together with --fr'
            )
        ka.set_config(fr=fr, from_remote_only=remote_only)
    else:
        ka.set_config(fr=dict(url=url, channel=channel, password=password),
                      from_remote_only=remote_only)
예제 #13
0
def ironclust(recording, sorting_out):
    from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor
    from ._ironclustsorter import IronClustSorter
    import kachery as ka

    # TODO: need to think about how to deal with this
    ka.set_config(fr='default_readonly')

    recording = AutoRecordingExtractor(dict(path=recording), download=True)

    # Sorting
    print('Sorting...')
    sorter = IronClustSorter(recording=recording,
                             output_folder='/tmp/tmp_ironclust_' +
                             _random_string(8),
                             delete_output_folder=True)

    sorter.set_params(detect_sign=-1,
                      adjacency_radius=50,
                      adjacency_radius_out=75,
                      detect_threshold=4,
                      prm_template_name='',
                      freq_min=300,
                      freq_max=8000,
                      merge_thresh=0.99,
                      pc_per_chan=0,
                      whiten=False,
                      filter_type='bandpass',
                      filter_detect_type='none',
                      common_ref_type='mean',
                      batch_sec_drift=300,
                      step_sec_drift=20,
                      knn=30,
                      min_count=30,
                      fGpu=True,
                      fft_thresh=8,
                      fft_thresh_low=0,
                      nSites_whiten=32,
                      feature_type='gpca',
                      delta_cut=1,
                      post_merge_mode=1,
                      sort_mode=1)
    timer = sorter.run()
    print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
예제 #14
0
파일: Explore.py 프로젝트: rly/kachery
 def on_message(self, msg):
     ka.set_config(fr='default_readonly', to='')
     # process custom messages from JavaScript here
     # In .js file, use this.pythonInterface.sendMessage({...})
     if msg['name'] == 'loadText':
         try:
             text = ka.load_text(msg['path'])
         except:
             self._send_message(
                 dict(name='loadedText',
                      path=msg['path'],
                      text=None,
                      error=
                      'Problem loading. Perhaps this is not a text file.'))
             return
         self._send_message(
             dict(name='loadedText', path=msg['path'], text=text))
예제 #15
0
파일: Explore.py 프로젝트: rly/kachery
    def javascript_state_changed(self, prev_state, state):
        ka.set_config(fr='default_readonly', to='')

        self._set_status('running', 'Running Explore')

        self._set_state(dir_content=None, file_content=None)

        print('running', state)
        if state.get('path'):
            try:
                dir_content = ka.read_dir(state['path'])
                file_content = None
            except:
                dir_content = None
                file_content = ka.load_text(state['path'])
            self._set_state(dir_content=dir_content, file_content=file_content)

        self._set_status('finished', 'Finished Explore ' + state['path'])
예제 #16
0
    def javascript_state_changed(self, prev_state, state):
        self._set_status('running', 'Running clustering')

        alg_name = state.get('alg_name', 'none')
        alg_arguments = state.get('alg_arguments', dict())
        kachery_config = state.get('kachery_config', None)
        args0 = alg_arguments.get(alg_name, {})

        if kachery_config:
            ka.set_config(**kachery_config)

        dirname = os.path.dirname(os.path.realpath(__file__))
        fname = os.path.join(dirname, 'clustering_datasets.json')
        with open(fname, 'r') as f:
            datasets = json.load(f)
        timer = time.time()
        for ds in datasets['datasets']:
            self._set_status('running', 'Running: {}'.format(ds['path']))
            print('Loading {}'.format(ds['path']))
            path2 = ka.load_file(ds['path'])
            ka.store_file(path2)
            if path2:
                ds['data'] = self._load_dataset_data(path2)
                if alg_name:
                    print('Clustering...')
                    ds['algName'] = alg_name
                    ds['algArgs'] = args0
                    timer0 = time.time()
                    ds['labels'] = self._do_clustering(
                        ds['data'], alg_name, args0,
                        dict(true_num_clusters=ds['trueNumClusters']))
                    elapsed0 = time.time() - timer0
                    if alg_name != 'none':
                        ds['elapsed'] = elapsed0
            else:
                print('Unable to realize file: {}'.format(ds['path']))
            elapsed = time.time() - timer
            if elapsed > 0.1:
                self._set_state(algorithms=self._algorithms, datasets=datasets)

        self._set_state(algorithms=self._algorithms, datasets=datasets)

        self._set_status('finished', 'Finished clustering')
예제 #17
0
def test_remote():
    ka.set_config(to='default_readwrite', fr='default_readwrite')

    for alg in ['sha1', 'md5']:
        ka.set_config(algorithm=alg)
        for pass0 in range(1, 3):
            if pass0 == 1:
                ka.set_config(from_remote_only=True, to_remote_only=True)
            elif pass0 == 2:
                ka.set_config(from_remote_only=False, to_remote_only=False)
            _test_store_text('abctest2')
            _test_store_object(dict(a=1, b=2, c=[1, 2, 3, 4]))
            _test_store_npy(np.ones((12, 14)))

    a = ka.load_text('sha1://906faceaf874dd64e81de0048f36f4bab0f1f171')
    print(a)
예제 #18
0
def spykingcircus(recording, sorting_out):
    import spiketoolkit as st
    import spikesorters as ss
    from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor
    import kachery as ka

    # TODO: need to think about how to deal with this
    ka.set_config(fr='default_readonly')

    recording = AutoRecordingExtractor(dict(path=recording), download=True)

    # Sorting
    print('Sorting...')
    sorter = ss.SpykingcircusSorter(recording=recording,
                                    output_folder='/tmp/tmp_spykingcircus_' +
                                    _random_string(8),
                                    delete_output_folder=True)

    sorter.set_params()
    timer = sorter.run()
    print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
    sorting = sorter.get_result()

    AutoSortingExtractor.write_sorting(sorting=sorting, save_path=sorting_out)
예제 #19
0
from pathlib import Path

import numpy as np

import kachery as ka
from pykilosort import Bunch, add_default_handler, run
from spikeextractors.extractors import bindatrecordingextractor as dat
from spikeforest2_utils import AutoRecordingExtractor, AutoSortingExtractor

dat_path = Path("test/test.bin").absolute()
dir_path = dat_path.parent

ka.set_config(fr="default_readonly")
recording_path = "sha1dir://c0879a26f92e4c876cd608ca79192a84d4382868.manual_franklab/tetrode_600s/sorter1_1"
recording = AutoRecordingExtractor(recording_path, download=True)
recording.write_to_binary_dat_format(str(dat_path))
n_channels = len(recording.get_channel_ids())

probe = Bunch()
probe.NchanTOT = n_channels
probe.chanMap = np.array(range(0, n_channels))
probe.kcoords = np.ones(n_channels)
probe.xc = recording.get_channel_locations()[:, 0]
probe.yc = recording.get_channel_locations()[:, 1]

add_default_handler(level="DEBUG")

params = {"nfilt_factor": 8, "AUCsplit": 0.85, "nskip": 5}

run(
    dat_path,
예제 #20
0
recording_path = 'sha1dir://ed0fe4de4ef2c54b7c9de420c87f9df200721b24.synth_visapy/mea_c30/set4'
sorting_true_path = 'sha1dir://ed0fe4de4ef2c54b7c9de420c87f9df200721b24.synth_visapy/mea_c30/set4/firings_true.mda'

sorter_name = 'kilosort2'
sorter = getattr(sorters, sorter_name)
params = {}

# Determine whether we are going to use gpu based on the name of the sorter
gpu = sorter_name in ['kilosort2', 'kilosort', 'tridesclous', 'ironclust']

# In the future we will check whether we have the correct version of the wrapper here
# Version: 0.1.5-w1

# Download the data (if needed)
ka.set_config(fr='default_readonly')
ka.load_file(recording_path + '/raw.mda')

# Run the spike sorting
with hither.config(container='docker://magland/sf-kilosort2:0.1.5', gpu=gpu):
    sorting_result = sorter.run(recording_path=recording_path,
                                sorting_out=hither.File(),
                                **params)
assert sorting_result.success
sorting_path = sorting_result.outputs.sorting_out

# Compare with ground truth
with hither.config(container='default'):
    compare_result = processing.compare_with_truth.run(
        sorting_path=sorting_path,
        sorting_true_path=sorting_true_path,
def main():
    ka.set_config(fr='default_readonly')
    path2 = ka.load_file(path_data)
    data2 = _load_dataset_data(path2)
    labels2 = ALG_dpclus(data2)
    print(labels2)
예제 #22
0
            obj, basename='{}.json'.format(label))
        with open(output_fname, 'w') as f:
            json.dump(obj, f, indent=4)


def register_groundtruth(*, recdir, output_fname, label, to):
    with ka.config(to=to):
        raw_path = ka.store_file(recdir + '/raw.mda')
        obj = dict(firings=raw_path)
        obj['self_reference'] = ka.store_object(
            obj, basename='{}.json'.format(label))
        with open(output_fname, 'w') as f:
            json.dump(obj, f, indent=4)


ka.set_config(fr='default_readwrite', to='default_readwrite')

list_rec = [
    str(f) for f in os.listdir(path_from)
    if os.path.isdir(os.path.join(path_from, f))
]

print('# files: {}'.format(len(list_rec)))
for rec1 in list_rec:
    print(f'Uploading {rec1}')
    path_rec1 = os.path.join(path_from, rec1)
    if False:
        register_recording(recdir=path_rec1,
                           output_fname=os.path.join(path_to, rec1 + '.json'),
                           label=rec1,
                           to='default_readwrite')
def main():
    parser = argparse.ArgumentParser(
        description=
        "Prepare SpikeForest recordings (i.e., populate this repository)")
    parser.add_argument('output_dir',
                        help='The output directory (e.g., recordings)')
    parser.add_argument(
        '--upload',
        action='store_true',
        help=
        'Whether to upload the recording objects to kachery (password required)'
    )
    # parser.add_argument('--verbose', action='store_true', help='Turn on verbose output')

    args = parser.parse_args()
    output_dir = args.output_dir

    if args.upload:
        ka.set_config(fr='default_readwrite', to='default_readwrite')
    else:
        ka.set_config(fr='default_readonly', )

    # geom_mearec_neuronexus = np.genfromtxt('mearec_neuronexus_geom.csv', delimiter=',').tolist()
    mearec_neuronexus_geom_fname = 'mearec_neuronexus_geom.csv'

    # Load a spikeforest analysis object
    X = ka.load_object(
        'sha1://b678d798d67b6faa3c6240aca52f3857c9e4b877/analysis.json')

    # the output directory on the local machine
    basedir = output_dir
    if os.path.exists(basedir):
        raise Exception('Directory already exists: {}'.format(basedir))

    if not os.path.exists(basedir):
        os.mkdir(basedir)

    studysets_to_add = ['PAIRED_ENGLISH']
    studysets_to_include = [
        'PAIRED_BOYDEN', 'PAIRED_CRCNS_HC1', 'PAIRED_MEA64C_YGER',
        'PAIRED_KAMPFF', 'PAIRED_MONOTRODE', 'SYNTH_BIONET', 'SYNTH_MONOTRODE',
        'SYNTH_MAGLAND', 'SYNTH_MEAREC_NEURONEXUS', 'SYNTH_MEAREC_TETRODE',
        'SYNTH_MONOTRODE', 'SYNTH_VISAPY', 'HYBRID_JANELIA', 'MANUAL_FRANKLAB'
    ]
    # studysets_to_include = ['PAIRED_CRCNS_HC1', 'PAIRED_MEA64C_YGER', 'PAIRED_KAMPFF', 'PAIRED_MONOTRODE', 'SYNTH_MONOTRODE', 'SYNTH_MAGLAND', 'SYNTH_MEAREC_NEURONEXUS', 'SYNTH_MEAREC_TETRODE', 'SYNTH_MONOTRODE', 'SYNTH_VISAPY', 'HYBRID_JANELIA', 'MANUAL_FRANKLAB']

    listdir_ = lambda _path: [
        x for x in os.listdir(_path) if os.path.isdir(os.path.join(_path, x))
    ]
    listfile_ = lambda _path: [
        x for x in os.listdir(_path) if os.path.isfile(os.path.join(_path, x))
    ]

    # These are the files to download within each recording
    fnames = ['geom.csv', 'params.json', 'raw.mda', 'firings_true.mda']
    # fnames = ['geom.csv', 'params.json']
    for studyset_name in studysets_to_add:
        studyset = dict(name=studyset_name,
                        info=studyset_name,
                        desciption=studyset_name)
        print('STUDYSET: {}'.format(studyset_name))
        studysetdir_local = os.path.join(basedir, studyset_name)
        assert os.path.exists(studysetdir_local)
        list_study = []
        list_study_name = listdir_(studysetdir_local)
        for study_name in list_study_name:
            study = dict(name=study_name, studySetName=studyset_name)
            print('STUDY: {}/{}'.format(studyset_name, study_name))
            studydir_local = os.path.join(studysetdir_local, study_name)
            assert os.path.exists(studydir_local)
            list_recname = listfile_(studydir_local)
            list_recname = [
                x.replace('.json', '') for x in list_recname
                if (not 'firings_true.json' in x)
            ]
            list_recording = []
            for recname in list_recname:
                recording = dict(name=recname,
                                 studyName=study_name,
                                 studySetName=studyset_name)
                print('RECORDING: {}/{}/{}'.format(studyset_name, study_name,
                                                   recname))
                with open(os.path.join(studydir_local, recname + '.json'),
                          'r') as f:
                    recording = json.load(f)

                recording['directory'] = recdir

                list_recording.append(recording)
            study['self_reference'] = ka.store_object(
                study, basename='{}.json'.format(study_name))
            list_study.append(study)
            with open(os.path.join(studydir_local, study_name + '.json'),
                      'w') as f:
                json.dump(study, f, indent=4)
        studyset['studies'] = list_study
        studyset['self_reference'] = ka.store_object(
            studyset, basename='{}.json'.format(studyset_name))
        with open(os.path.join(studysetdir_local, studyset_name + '.json'),
                  'w') as f:
            json.dump(studyset, f, indent=4)

    # add studysets
    StudySets_add = []
    for studyset_name in studysets_to_add:

        StudySets_add.append(studyset)

    StudySets = list.join(X['StudySets'], StudySets_add)
    studysets_obj = dict(StudySets=X['StudySets'])
    studysets_path = ka.store_object(studysets_obj, basename='studysets.json')
    with open(os.path.join(basedir, 'studysets'), 'w') as f:
        f.write(studysets_path)
예제 #24
0
def main():
    from spikeforest2 import sorters
    from spikeforest2 import processing

    parser = argparse.ArgumentParser(
        description='Run the SpikeForest2 main analysis')
    # parser.add_argument('analysis_file', help='Path to the analysis specification file (.json format).')
    # parser.add_argument('--config', help='Configuration file', required=True)
    # parser.add_argument('--output', help='Analysis output file (.json format)', required=True)
    # parser.add_argument('--slurm', help='Optional SLURM configuration file (.json format)', required=False, default=None)
    # parser.add_argument('--verbose', help='Provide some additional verbose output.', action='store_true')
    parser.add_argument(
        'spec',
        help='Path to the .json file containing the analysis specification')
    parser.add_argument('--output',
                        '-o',
                        help='The output .json file',
                        required=True)
    parser.add_argument('--force-run',
                        help='Force rerunning of all spike sorting',
                        action='store_true')
    parser.add_argument(
        '--force-run-all',
        help='Force rerunning of all spike sorting and other processing',
        action='store_true')
    parser.add_argument('--parallel',
                        help='Optional number of parallel jobs',
                        required=False,
                        default='0')
    parser.add_argument('--slurm',
                        help='Path to slurm config file',
                        required=False,
                        default=None)
    parser.add_argument('--cache',
                        help='The cache database to use',
                        required=False,
                        default=None)
    parser.add_argument('--rerun-failing',
                        help='Rerun sorting jobs that previously failed',
                        action='store_true')
    parser.add_argument('--test', help='Only run a few.', action='store_true')
    parser.add_argument('--job-timeout',
                        help='Timeout for sorting jobs',
                        required=False,
                        default=600)
    parser.add_argument('--log-file',
                        help='Log file for analysis progress',
                        required=False,
                        default=None)

    args = parser.parse_args()
    force_run_all = args.force_run_all

    # the following apply to sorting jobs only
    force_run = args.force_run or args.force_run_all
    job_timeout = float(args.job_timeout)
    cache_failing = True
    rerun_failing = args.rerun_failing

    with open(args.spec, 'r') as f:
        spec = json.load(f)

    # clear the log file
    if args.log_file is not None:
        with open(args.log_file, 'w'):
            pass

    studysets_path = spec['studysets']
    studyset_names = spec['studyset_names']
    spike_sorters = spec['spike_sorters']

    ka.set_config(fr='default_readonly')

    print(f'Loading study sets object from: {studysets_path}')
    studysets_obj = ka.load_object(studysets_path)
    if not studysets_obj:
        raise Exception(f'Unable to load: {studysets_path}')

    all_study_sets = studysets_obj['StudySets']
    study_sets = []
    for studyset in all_study_sets:
        if studyset['name'] in studyset_names:
            study_sets.append(studyset)

    if int(args.parallel) > 0:
        job_handler = hither.ParallelJobHandler(int(args.parallel))
        job_handler_gpu = job_handler
        job_handler_ks = job_handler
    elif args.slurm:
        with open(args.slurm, 'r') as f:
            slurm_config = json.load(f)
        job_handler = hither.SlurmJobHandler(working_dir='tmp_slurm',
                                             **slurm_config['cpu'])
        job_handler_gpu = hither.SlurmJobHandler(working_dir='tmp_slurm',
                                                 **slurm_config['gpu'])
        job_handler_ks = hither.SlurmJobHandler(working_dir='tmp_slurm',
                                                **slurm_config['ks'])
    else:
        job_handler = None
        job_handler_gpu = None
        job_handler_ks = None

    with hither.config(container='default',
                       cache=args.cache,
                       force_run=force_run_all,
                       job_handler=job_handler,
                       log_path=args.log_file), hither.job_queue():
        studies = []
        recordings = []
        for studyset in study_sets:
            studyset_name = studyset['name']
            print(f'================ STUDY SET: {studyset_name}')
            studies0 = studyset['studies']
            if args.test:
                studies0 = studies0[:1]
                studyset['studies'] = studies0
            for study in studies0:
                study['study_set'] = studyset_name
                study_name = study['name']
                print(f'======== STUDY: {study_name}')
                recordings0 = study['recordings']
                if args.test:
                    recordings0 = recordings0[:2]
                    study['recordings'] = recordings0
                for recording in recordings0:
                    recording['study'] = study_name
                    recording['study_set'] = studyset_name
                    recording['firings_true'] = recording['firingsTrue']
                    recordings.append(recording)
                studies.append(study)

        # Download recordings
        for recording in recordings:
            ka.load_file(recording['directory'] + '/raw.mda')
            ka.load_file(recording['directory'] + '/firings_true.mda')

        # Attach results objects
        for recording in recordings:
            recording['results'] = dict()

        # Summarize recordings
        for recording in recordings:
            recording_path = recording['directory']
            sorting_true_path = recording['firingsTrue']
            recording['results'][
                'computed-info'] = processing.compute_recording_info.run(
                    _label=
                    f'compute-recording-info:{recording["study"]}/{recording["name"]}',
                    recording_path=recording_path,
                    json_out=hither.File())
            recording['results'][
                'true-units-info'] = processing.compute_units_info.run(
                    _label=
                    f'compute-units-info:{recording["study"]}/{recording["name"]}',
                    recording_path=recording_path,
                    sorting_path=sorting_true_path,
                    json_out=hither.File())

        # Spike sorting
        for sorter in spike_sorters:
            for recording in recordings:
                if recording['study_set'] in sorter['studysets']:
                    recording_path = recording['directory']
                    sorting_true_path = recording['firingsTrue']

                    algorithm = sorter['processor_name']
                    if not hasattr(sorters, algorithm):
                        raise Exception(
                            f'No such sorting algorithm: {algorithm}')
                    Sorter = getattr(sorters, algorithm)

                    if algorithm in ['ironclust']:
                        gpu = True
                        jh = job_handler_gpu
                    elif algorithm in ['kilosort', 'kilosort2']:
                        gpu = True
                        jh = job_handler_ks
                    else:
                        gpu = False
                        jh = job_handler
                    with hither.config(gpu=gpu,
                                       force_run=force_run,
                                       exception_on_fail=False,
                                       cache_failing=cache_failing,
                                       rerun_failing=rerun_failing,
                                       job_handler=jh,
                                       job_timeout=job_timeout):
                        sorting_result = Sorter.run(
                            _label=
                            f'{algorithm}:{recording["study"]}/{recording["name"]}',
                            recording_path=recording['directory'],
                            sorting_out=hither.File())
                        recording['results']['sorting-' +
                                             sorter['name']] = sorting_result
                    recording['results'][
                        'comparison-with-truth-' +
                        sorter['name']] = processing.compare_with_truth.run(
                            _label=
                            f'comparison-with-truth:{algorithm}:{recording["study"]}/{recording["name"]}',
                            sorting_path=sorting_result.outputs.sorting_out,
                            sorting_true_path=sorting_true_path,
                            json_out=hither.File())
                    recording['results'][
                        'units-info-' +
                        sorter['name']] = processing.compute_units_info.run(
                            _label=
                            f'units-info:{algorithm}:{recording["study"]}/{recording["name"]}',
                            recording_path=recording_path,
                            sorting_path=sorting_result.outputs.sorting_out,
                            json_out=hither.File())

    # Assemble all of the results
    print('')
    print('=======================================================')
    print('Assembling results...')
    for recording in recordings:
        print(
            f'Assembling recording: {recording["study"]}/{recording["name"]}')
        recording['summary'] = dict(
            plots=dict(),
            computed_info=ka.load_object(
                recording['results']['computed-info'].outputs.json_out._path),
            true_units_info=ka.store_file(
                recording['results']
                ['true-units-info'].outputs.json_out._path))
    sorting_results = []
    for sorter in spike_sorters:
        for recording in recordings:
            if recording['study_set'] in sorter['studysets']:
                print(
                    f'Assembling sorting: {sorter["processor_name"]} {recording["study"]}/{recording["name"]}'
                )
                sorting_result = recording['results']['sorting-' +
                                                      sorter['name']]
                comparison_result = recording['results'][
                    'comparison-with-truth-' + sorter['name']]
                units_info_result = recording['results']['units-info-' +
                                                         sorter['name']]
                console_out_str = _console_out_to_str(
                    sorting_result.runtime_info['console_out'])
                console_out_path = ka.store_text(console_out_str)
                sr = dict(
                    recording=recording,
                    sorter=sorter,
                    firings_true=recording['directory'] + '/firings_true.mda',
                    processor_name=sorter['processor_name'],
                    processor_version=sorting_result.version,
                    execution_stats=dict(
                        start_time=sorting_result.runtime_info['start_time'],
                        end_time=sorting_result.runtime_info['end_time'],
                        elapsed_sec=sorting_result.runtime_info['end_time'] -
                        sorting_result.runtime_info['start_time'],
                        retcode=0 if sorting_result.success else -1,
                        timed_out=sorting_result.runtime_info.get(
                            'timed_out', False)),
                    container=sorting_result.container,
                    console_out=console_out_path)
                if sorting_result.success:
                    sr['firings'] = ka.store_file(
                        sorting_result.outputs.sorting_out._path)
                    sr['comparison_with_truth'] = dict(json=ka.store_file(
                        comparison_result.outputs.json_out._path))
                    sr['sorted_units_info'] = ka.store_file(
                        units_info_result.outputs.json_out._path)
                else:
                    sr['firings'] = None
                    sr['comparison_with_truth'] = None
                    sr['sorted_units_info'] = None
                sorting_results.append(sr)

    # Delete results from recordings
    for recording in recordings:
        del recording['results']

    # Aggregate sorting results
    print('')
    print('=======================================================')
    print('Aggregating sorting results...')
    aggregated_sorting_results = aggregate_sorting_results(
        studies, recordings, sorting_results)

    # Show output summary
    for sr in aggregated_sorting_results['study_sorting_results']:
        study_name = sr['study']
        sorter_name = sr['sorter']
        n1 = np.array(sr['num_matches'])
        n2 = np.array(sr['num_false_positives'])
        n3 = np.array(sr['num_false_negatives'])
        accuracies = n1 / (n1 + n2 + n3)
        avg_accuracy = np.mean(accuracies)
        txt = 'STUDY: {}, SORTER: {}, AVG ACCURACY: {}'.format(
            study_name, sorter_name, avg_accuracy)
        print(txt)

    output_object = dict(studies=studies,
                         recordings=recordings,
                         study_sets=study_sets,
                         sorting_results=sorting_results,
                         aggregated_sorting_results=ka.store_object(
                             aggregated_sorting_results,
                             basename='aggregated_sorting_results.json'))

    print(f'Writing output to {args.output}...')
    with open(args.output, 'w') as f:
        json.dump(output_object, f, indent=4)
    print('Done.')
예제 #25
0
def main():
    ka.set_config(**kachery_config)
    path2 = ka.load_file(path_data)
    data2 = _load_dataset_data(path2)
    labels2 = ALG_dpclus(data2)
    print(labels2)
def main():
    parser = argparse.ArgumentParser(
        description=help_txt, formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument(
        'analysis_files',
        nargs='+',
        help='The files generated by the main spikeforest analysis')
    parser.add_argument('--output',
                        '-o',
                        help='The output .json file',
                        required=True)

    args = parser.parse_args()

    ka.set_config(fr='default_readonly')

    print(
        '******************************** LOADING ANALYSIS OUTPUT OBJECTS...')
    studies = []
    study_sets = []
    recordings = []
    sorting_results = []
    for analysis_file in args.analysis_files:
        print('Loading: {}'.format(analysis_file))
        obj = ka.load_object(analysis_file)
        if obj is not None:
            studies = studies + obj['studies']
            study_sets = study_sets + obj.get('study_sets', [])
            recordings = recordings + obj['recordings']
            sorting_results = sorting_results + obj['sorting_results']
        else:
            raise Exception('Unable to load: {}'.format(analysis_file))

    # ALGORITHMS
    print('******************************** ASSEMBLING ALGORITHMS...')
    algorithms_by_processor_name = dict()
    Algorithms = []
    basepath = '../../spikeforest2/sorters/descriptions'
    repo_base_url = 'https://github.com/flatironinstitute/spikeforest/blob/master'
    for item in os.listdir(basepath):
        if item.endswith('.md'):
            alg = frontmatter.load(basepath + '/' + item).to_dict()
            alg['markdown_link'] = repo_base_url + '/spikeforest/spikeforestsorters/descriptions/' + item
            alg['markdown'] = alg['content']
            del alg['content']
            if 'processor_name' in alg:
                algorithms_by_processor_name[alg['processor_name']] = alg
            Algorithms.append(alg)
    print([alg['label'] for alg in Algorithms])

    Studies = []
    for study in studies:
        Studies.append(
            dict(
                name=study['name'],
                studySet=study['study_set'],
                description=study.get('description', ''),
                recordings=[]
                # the following can be obtained from the other collections
                # numRecordings, sorters, etc...
            ))
    print([S['name'] for S in Studies])

    print('******************************** ASSEMBLING STUDY SETS...')
    study_sets_by_name = dict()
    for study_set in study_sets:
        study_sets_by_name[study_set['name']] = study_set
        study_set['studies'] = []
    studies_by_name = dict()
    for study in studies:
        study0 = dict(name=study['name'],
                      studySetName=study['study_set'],
                      recordings=[])
        study_sets_by_name[study['study_set']]['studies'].append(study0)
        studies_by_name[study0['name']] = study0
    for recording in recordings:
        true_units_info = ka.load_object(
            recording['summary']['true_units_info'])
        if not true_units_info:
            print(recording['summary']['true_units_info'])
            raise Exception(
                'Unable to load true_units_info for recording {}'.format(
                    recording['name']))
        recording0 = dict(
            name=recording['name'],
            studyName=recording['study'],
            studySetName=studies_by_name[recording['study']]['studySetName'],
            directory=recording['directory'],
            firingsTrue=recording['firings_true'],
            sampleRateHz=recording['summary']['computed_info']['samplerate'],
            numChannels=recording['summary']['computed_info']['num_channels'],
            durationSec=recording['summary']['computed_info']['duration_sec'],
            numTrueUnits=len(true_units_info),
            spikeSign=-1  # TODO: set this properly
        )
        studies_by_name[recording0['studyName']]['recordings'].append(
            recording0)
    StudySets = []
    for study_set in study_sets:
        StudySets.append(study_set)

    # SORTING RESULTS
    print('******************************** SORTING RESULTS...')
    SortingResults = []
    for sr in sorting_results:
        SR = dict(
            recordingName=sr['recording']['name'],
            studyName=sr['recording']['study'],
            sorterName=sr['sorter']['name'],
            recordingDirectory=sr['recording']['directory'],
            firingsTrue=sr['recording']['firings_true'],
            consoleOut=sr['console_out'],
            container=sr['container'],
            cpuTimeSec=sr['execution_stats'].get('elapsed_sec', None),
            returnCode=sr['execution_stats'].get(
                'retcode', 0
            ),  # TODO: in future, the default should not be 0 -- rather it should be a required field of execution_stats
            timedOut=sr['execution_stats'].get('timed_out', False),
            startTime=datetime.fromtimestamp(
                sr['execution_stats'].get('start_time')).isoformat(),
            endTime=datetime.fromtimestamp(
                sr['execution_stats'].get('end_time')).isoformat())
        if sr.get('firings', None):
            SR['firings'] = sr['firings']
            if not sr.get('comparison_with_truth', None):
                print(
                    'Warning: comparison with truth not found for sorting result: {} {}/{}'
                    .format(sr['sorter']['name'], sr['recording']['study'],
                            sr['recording']['name']))
                print('Console output is here: ' + sr['console_out'])
        else:
            print('Warning: firings not found for sorting result: {} {}/{}'.
                  format(sr['sorter']['name'], sr['recording']['study'],
                         sr['recording']['name']))
            print('Console output is here: ' + sr['console_out'])
        SortingResults.append(SR)
    # print('Num unit results:', len(UnitResults))

    # SORTERS
    print('******************************** ASSEMBLING SORTERS...')
    sorters_by_name = dict()
    for sr in sorting_results:
        sorters_by_name[sr['sorter']['name']] = sr['sorter']
    Sorters = []
    sorter_names = sorted(list(sorters_by_name.keys()))
    sorter_names = [sorter_name for sorter_name in sorter_names]
    for sorter_name in sorter_names:
        sorter = sorters_by_name[sorter_name]
        alg = algorithms_by_processor_name.get(sorter['processor_name'],
                                               dict())
        alg_label = alg.get('label', sorter['processor_name'])
        Sorters.append(
            dict(
                name=sorter['name'],
                algorithmName=alg_label,
                processorName=sorter['processor_name'],
                processorVersion='0',  # jfm to provide this
                sortingParameters=sorter['params']))
    print([S['name'] + ':' + S['algorithmName'] for S in Sorters])

    # STUDY ANALYSIS RESULTS
    print(
        '******************************** ASSEMBLING STUDY ANALYSIS RESULTS...'
    )
    StudyAnalysisResults = [
        _assemble_study_analysis_result(study_name=study['name'],
                                        study_set_name=study['study_set'],
                                        recordings=recordings,
                                        sorting_results=sorting_results,
                                        sorter_names=sorter_names)
        for study in studies
    ]

    # GENERAL
    print('******************************** ASSEMBLING GENERAL INFO...')
    General = [
        dict(dateUpdated=datetime.now().isoformat(),
             packageVersions=dict(spikeforest2=pkg_resources.get_distribution(
                 "spikeforest2").version))
    ]

    obj = dict(mode='spike-front',
               StudySets=StudySets,
               SortingResults=SortingResults,
               Sorters=Sorters,
               Algorithms=Algorithms,
               StudyAnalysisResults=StudyAnalysisResults,
               General=General)
    print(f'Writing to {args.output}...')
    with open(args.output, 'w') as f:
        json.dump(obj, f, indent=4)
    print('Done.')
예제 #27
0
def main():
    parser = argparse.ArgumentParser(
        description="Prepare SpikeForest recordings (i.e., populate this repository)")
    parser.add_argument('output_dir', help='The output directory (e.g., recordings)')
    parser.add_argument('--upload', action='store_true', help='Whether to upload the recording objects to kachery (password required)')
    # parser.add_argument('--verbose', action='store_true', help='Turn on verbose output')

    args = parser.parse_args()
    output_dir = args.output_dir

    if args.upload:
        ka.set_config(
            fr='default_readwrite',
            to='default_readwrite'
        )
    else:
        ka.set_config(
            fr='default_readonly',
        )
    
    # geom_mearec_neuronexus = np.genfromtxt('mearec_neuronexus_geom.csv', delimiter=',').tolist()
    mearec_neuronexus_geom_fname = 'mearec_neuronexus_geom.csv'

    # Load a spikeforest analysis object
    X = ka.load_object('sha1://b678d798d67b6faa3c6240aca52f3857c9e4b877/analysis.json')

    # the output directory on the local machine
    basedir = output_dir
    if os.path.exists(basedir):
        raise Exception('Directory already exists: {}'.format(basedir))

    if not os.path.exists(basedir):
        os.mkdir(basedir)

    studysets_to_include = ['PAIRED_BOYDEN', 'PAIRED_CRCNS_HC1', 'PAIRED_MEA64C_YGER', 'PAIRED_KAMPFF', 'PAIRED_MONOTRODE', 'SYNTH_BIONET', 'SYNTH_MONOTRODE', 'SYNTH_MAGLAND', 'SYNTH_MEAREC_NEURONEXUS', 'SYNTH_MEAREC_TETRODE', 'SYNTH_MONOTRODE', 'SYNTH_VISAPY', 'HYBRID_JANELIA', 'MANUAL_FRANKLAB']
    # studysets_to_include = ['PAIRED_CRCNS_HC1', 'PAIRED_MEA64C_YGER', 'PAIRED_KAMPFF', 'PAIRED_MONOTRODE', 'SYNTH_MONOTRODE', 'SYNTH_MAGLAND', 'SYNTH_MEAREC_NEURONEXUS', 'SYNTH_MEAREC_TETRODE', 'SYNTH_MONOTRODE', 'SYNTH_VISAPY', 'HYBRID_JANELIA', 'MANUAL_FRANKLAB']

    # These are the files to download within each recording
    fnames = ['geom.csv', 'params.json', 'raw.mda', 'firings_true.mda']
    # fnames = ['geom.csv', 'params.json']
    for studyset in X['StudySets']:
        studyset_name = studyset['name']
        if studyset_name in studysets_to_include:
            print('STUDYSET: {}'.format(studyset['name']))
            studysetdir_local = os.path.join(basedir, studyset_name)
            if not os.path.exists(studysetdir_local):
                os.mkdir(studysetdir_local)
            for study in studyset['studies']:
                study_name = study['name']
                print('STUDY: {}/{}'.format(studyset_name, study_name))
                studydir_local = os.path.join(studysetdir_local, study_name)
                if not os.path.exists(studydir_local):
                    os.mkdir(studydir_local)
                for recording in study['recordings']:
                    if studyset_name == 'SYNTH_MEAREC_NEURONEXUS':
                        patch_recording_geom(recording, mearec_neuronexus_geom_fname)
                    recname = recording['name']
                    print('RECORDING: {}/{}/{}'.format(studyset_name, study_name, recname))
                    recdir = recording['directory']
                    recfile = os.path.join(studydir_local, recname + '.json')
                    obj = dict(
                        raw=recdir + '/raw.mda',
                        params=ka.load_object(recdir + '/params.json'),
                        geom=np.genfromtxt(ka.load_file(recdir + '/geom.csv'), delimiter=',').T
                    )
                    obj = _json_serialize(obj)
                    obj['self_reference'] = ka.store_object(obj, basename='{}/{}/{}.json'.format(studyset_name, study_name, recname))
                    with open(recfile, 'w') as f:
                        json.dump(obj, f, indent=4)
                    firings_true_file = os.path.join(studydir_local, recname + '.firings_true.json')
                    obj2 = dict(
                        firings=recdir + '/firings_true.mda'
                    )
                    obj2['self_reference'] = ka.store_object(obj2, basename='{}/{}/{}.firings_true.json'.format(studyset_name, study_name, recname))
                    with open(firings_true_file, 'w') as f:
                        json.dump(obj2, f, indent=4)
                study['self_reference'] = ka.store_object(study, basename='{}.json'.format(study_name))
                with open(os.path.join(studydir_local, study_name + '.json'), 'w') as f:
                    json.dump(study, f, indent=4)
            studyset['self_reference'] = ka.store_object(studyset, basename='{}.json'.format(studyset_name))
            with open(os.path.join(studysetdir_local, studyset_name + '.json'), 'w') as f:
                json.dump(studyset, f, indent=4)
    studysets_obj = dict(
        StudySets=X['StudySets']
    )
    studysets_path = ka.store_object(studysets_obj, basename='studysets.json')
    with open(os.path.join(basedir, 'studysets'), 'w') as f:
        f.write(studysets_path)