def test1():
    import kachery_p2p as kp

    w = fs.load_workspace()
    print(w.get_uri())
    
    geom_uri = 'sha1://fce1fb4c8637a36edb34669e1ac612700ce7151e/lens_r01.go3'
    job_cache = hi.JobCache(feed_name='job-cache')
    with hi.Config(use_container=True, show_console=True, job_cache=None):
        j: hi.Job = miniwasp_hither.run(geom_uri=geom_uri, omega=3.141592*2/330.0, ppw=50)
        H, E = j.wait().return_value
        H = cast(np.ndarray, H)
        E = cast(np.ndarray, E)
    
    print(H.shape, E.shape)

    [nc, N1, N2, N3] = H.shape
    transformation = np.array([
        [1/N1, 0, 0, 0],
        [0, 1/N2, 0, 0],
        [0, 0, 1/N3, 0]
    ])
    f = fs.FieldModel(label='miniwasp-H', data=H, components=['x', 'y', 'z'], transformation=transformation)
    w.add_field_model(f)
    f = fs.FieldModel(label='miniwasp-E', data=E, components=['x', 'y', 'z'], transformation=transformation)
    w.add_field_model(f)
示例#2
0
def createjob_get_firing_data(labbox, sorting_object, recording_object,
                              configuration):
    jh = labbox.get_job_handler('partition1')
    jc = labbox.get_job_cache()
    with hi.Config(job_cache=jc, job_handler=jh, use_container=jh.is_remote()):
        return get_firing_data.run(sorting_object=sorting_object,
                                   recording_object=recording_object,
                                   configuration=configuration)
def example4():
    import kachery_p2p as kp
    geom_uri = 'sha1://fce1fb4c8637a36edb34669e1ac612700ce7151e/lens_r01.go3'
    job_cache = hi.JobCache(feed_name='job-cache')
    with hi.Config(use_container=True, show_console=True, job_cache=None):
        j: hi.Job = example4_hither.run(geom_uri=geom_uri, omega=3.141592*2/330.0, ppw=50)
        x = j.wait().return_value
        return x
def preload_extract_snippets(labbox, recording_object, sorting_object):
    from labbox_ephys import prepare_snippets_h5
    jh = labbox.get_job_handler('partition2')
    jc = labbox.get_job_cache()
    with hi.Config(job_cache=jc, job_handler=jh, use_container=jh.is_remote()):
        snippets_h5 = prepare_snippets_h5.run(
            recording_object=recording_object, sorting_object=sorting_object)
        return snippets_h5
def createjob_get_unit_snrs(labbox,
                            sorting_object,
                            recording_object,
                            configuration={}):
    jh = labbox.get_job_handler('partition1')
    jc = labbox.get_job_cache()
    with hi.Config(job_cache=jc, job_handler=jh, use_container=jh.is_remote()):
        snippets_h5 = prepare_snippets_h5.run(
            recording_object=recording_object, sorting_object=sorting_object)
        return get_unit_snrs.run(snippets_h5=snippets_h5)
def createjob_individual_cluster_features(labbox, recording_object,
                                          sorting_object, unit_id):
    from labbox_ephys import prepare_snippets_h5
    jh = labbox.get_job_handler('partition1')
    jc = labbox.get_job_cache()
    with hi.Config(job_cache=jc, job_handler=jh, use_container=jh.is_remote()):
        snippets_h5 = prepare_snippets_h5.run(
            recording_object=recording_object, sorting_object=sorting_object)
        return individual_cluster_features.run(snippets_h5=snippets_h5,
                                               unit_id=unit_id)
def createjob_fetch_average_waveform_plot_data(labbox, recording_object,
                                               sorting_object, unit_id):
    from labbox_ephys import prepare_snippets_h5
    jh = labbox.get_job_handler('partition1')
    jc = labbox.get_job_cache()
    with hi.Config(job_cache=jc, job_handler=jh, use_container=jh.is_remote()):
        snippets_h5 = prepare_snippets_h5.run(
            recording_object=recording_object, sorting_object=sorting_object)
        return fetch_average_waveform_plot_data.run(snippets_h5=snippets_h5,
                                                    unit_id=unit_id)
示例#8
0
def createjob_sample_data_object_slices(labbox, data_uri, slices,
                                        component_indices: List[int],
                                        mode: str):
    # sample_data_object_slices(data_uri, slices, component_indices)
    with hi.Config(job_cache=labbox.get_job_cache()):
        return sample_data_object_slices.run(
            data_uri=data_uri,
            slices=slices,
            component_indices=component_indices,
            mode=mode)
def createjob_get_sorting_unit_snippets(labbox, recording_object,
                                        sorting_object, unit_id, time_range,
                                        max_num_snippets):
    from labbox_ephys import prepare_snippets_h5
    jh = labbox.get_job_handler('partition1')
    jc = labbox.get_job_cache()
    with hi.Config(job_cache=jc, job_handler=jh, use_container=jh.is_remote()):
        snippets_h5 = prepare_snippets_h5.run(
            recording_object=recording_object, sorting_object=sorting_object)
        return get_sorting_unit_snippets.run(snippets_h5=snippets_h5,
                                             unit_id=unit_id,
                                             time_range=time_range,
                                             max_num_snippets=max_num_snippets)
def main():
    (params, std_args) = init_configuration()
    study_sets = load_study_records(params.study_source_file)
    study_matrix = parse_sorters(params.sorter_spec_file,
                                 list(study_sets.keys()))
    sorting_matrix = populate_sorting_matrix(study_matrix, study_sets)
    sorting_matrix = remove_preexisting_records(sorting_matrix,
                                                params.workspace_uri)
    hither_config = extract_hither_config(std_args)
    jobs: hi.Job = []

    try:
        with hi.Config(**hither_config):
            sortings = list(sorting_loop(sorting_matrix))
            with hi.Config(job_handler=None, job_cache=None):
                for sorting in sortings:
                    p = {
                        'sorting_entry': sorting,
                        'workspace_uri': params.workspace_uri
                    }
                    jobs.append(hi.Job(hi_post_result_to_workspace, p))
        hi.wait(None)
    finally:
        call_cleanup(hither_config)
def main():
    (args, std_args) = init_configuration()
    study_sets = load_study_records(args['study_source_file'])
    study_matrix = parse_sorters(args['sorter_spec_file'],
                                 list(study_sets.keys()))
    sorting_matrix = populate_sorting_matrix(study_matrix, study_sets)

    hither_config = extract_hither_config(std_args)
    try:
        with hi.Config(**hither_config):
            sortings = list(sorting_loop(sorting_matrix))
        hi.wait(None)
    finally:
        call_cleanup(hither_config)
    results: List[OutputRecord] = [make_output_record(job) for job in sortings]
    output_records(results, std_args)
示例#12
0
def main():
    (args, std_args) = init_configuration()
    sortings = load_sortings(args['sortingsfile'])
    if args['recordingset'] is not None and args['recordingset'] != '':
        sortings = [
            s for s in sortings if s['studyName'] == args['recordingset']
        ]
    hither_config = extract_hither_config(std_args)
    comparison_list = []
    try:
        print(f"\t\tScript execution beginning at {time.ctime()}")
        start_time = time.time()
        with hi.Config(**hither_config):
            extraction_loop(sortings, comparison_list, std_args['test'])
        hi.wait(None)
    finally:
        call_cleanup(hither_config)

    output_results(comparison_list, std_args['outfile'])
    print(f"\n\n\t\tElapsed time: {time.time() - start_time:.3f} sec")
    print(f"\t\tScript execution complete at {time.ctime()}")
def test_sorting(sorter_func):
    import sortingview as sv

    recording_name = 'paired_kampff/2014_11_25_Pair_3_0'
    recording_uri = 'sha1://a205f87cef8b7f86df7a09cddbc79a1fbe5df60f/2014_11_25_Pair_3_0.json'
    sorting_uri = 'sha1://c656add63d85a17840980084a1ff1cdc662a2cd5/2014_11_25_Pair_3_0.firings_true.json'

    recording = sv.LabboxEphysRecordingExtractor(recording_uri, download=True)
    sorting_true = sv.LabboxEphysSortingExtractor(sorting_uri)

    channel_ids = recording.get_channel_ids()
    samplerate = recording.get_sampling_frequency()
    num_timepoints = recording.get_num_frames()
    print(f'{recording_name}')
    print(
        f'Recording has {len(channel_ids)} channels and {num_timepoints} timepoints (samplerate: {samplerate})'
    )

    unit_ids = sorting_true.get_unit_ids()
    spike_train = sorting_true.get_unit_spike_train(unit_id=unit_ids[0])
    print(f'Unit {unit_ids[0]} has {len(spike_train)} events')

    jh = hi.ParallelJobHandler(num_workers=4)
    # jh = hi.SlurmJobHandler(num_jobs_per_allocation=4, max_simultaneous_allocations=4, srun_command='')
    log = hi.Log()
    with hi.Config(use_container=True,
                   job_handler=jh,
                   log=log,
                   show_console=True):
        sorting_object = hi.Job(sorter_func, {
            'recording_object': recording.object()
        }).wait().return_value
        sorting = sv.LabboxEphysSortingExtractor(sorting_object)

    unit_ids = sorting.get_unit_ids()
    spike_train = sorting.get_unit_spike_train(unit_id=unit_ids[0])
    print(f'Unit {unit_ids[0]} has {len(spike_train)} events')
示例#14
0
def test_sorting(sorter_func,
                 *,
                 show_console=True,
                 job_handler: Union[None, hi.JobHandler] = None):
    import sortingview as sv

    recording_name = 'paired_kampff/2014_11_25_Pair_3_0'
    recording_uri = 'sha1://a205f87cef8b7f86df7a09cddbc79a1fbe5df60f/2014_11_25_Pair_3_0.json'
    sorting_uri = 'sha1://c656add63d85a17840980084a1ff1cdc662a2cd5/2014_11_25_Pair_3_0.firings_true.json'

    recording = sv.LabboxEphysRecordingExtractor(recording_uri, download=True)
    sorting_true = sv.LabboxEphysSortingExtractor(sorting_uri)

    channel_ids = recording.get_channel_ids()
    samplerate = recording.get_sampling_frequency()
    num_timepoints = recording.get_num_frames()
    print(f'{recording_name}')
    print(
        f'Recording has {len(channel_ids)} channels and {num_timepoints} timepoints (samplerate: {samplerate})'
    )

    unit_ids = sorting_true.get_unit_ids()
    spike_train = sorting_true.get_unit_spike_train(unit_id=unit_ids[0])
    print(f'Unit {unit_ids[0]} has {len(spike_train)} events')

    with hi.Config(use_container=True,
                   show_console=show_console,
                   job_handler=job_handler):
        sorting_object = hi.Job(sorter_func, {
            'recording_object': recording.object()
        }).wait().return_value
        sorting = sv.LabboxEphysSortingExtractor(sorting_object)

    unit_ids = sorting.get_unit_ids()
    spike_train = sorting.get_unit_spike_train(unit_id=unit_ids[0])
    print(f'Unit {unit_ids[0]} has {len(spike_train)} events')
示例#15
0
 def handle_message(self, msg):
     type0 = msg.get('type')
     if type0 == 'hitherCreateJob':
         functionName = msg['functionName']
         kwargs = msg['kwargs']
         client_job_id = msg['clientJobId']
         self._log(f'hitherCreateJob-1 {functionName} {client_job_id}')
         try:
             f = hi2.get_function(functionName)
             if f is not None:
                 with hi2.Config(log=self._hither_log):
                     job_or_result = f(**kwargs,
                                       labbox=self._labbox_context)
             else:
                 raise Exception(
                     f'Hither function not registered: {functionName}')
         except Exception as err:
             self._send_message({
                 'type': 'hitherJobError',
                 'job_id': client_job_id,
                 'client_job_id': client_job_id,
                 'error_message': f'Error creating outer job: {str(err)}',
                 'runtime_info': None
             })
             return
         if isinstance(job_or_result, hi2.Job):
             job: hi2.Job = job_or_result
             setattr(job, '_client_job_id', client_job_id)
             job_id = job.job_id
             self._jobs_by_id[job_id] = job
             print(
                 f'======== Created hither job (2): {job_id} {functionName}'
             )
             self._log(
                 f'hitherCreateJob-2 {functionName} {client_job_id} {job_id}'
             )
             self._send_message({
                 'type': 'hitherJobCreated',
                 'job_id': job_id,
                 'client_job_id': client_job_id
             })
         else:
             self._log(f'hitherCreateJob-3 {functionName} {client_job_id}')
             result = job_or_result
             msg = {
                 'type':
                 'hitherJobFinished',
                 'client_job_id':
                 client_job_id,
                 'job_id':
                 client_job_id,
                 # 'result': _make_json_safe(result),
                 'result_sha1':
                 _get_sha1_from_uri(kp.store_json(_make_json_safe(result))),
                 'runtime_info': {}
             }
     elif type0 == 'hitherCancelJob':
         job_id = msg['job_id']
         self._log(f'hitherCancelJob-1 {job_id}')
         assert job_id, 'Missing job_id'
         assert job_id in self._jobs_by_id, f'No job with id: {job_id}'
         job = self._jobs_by_id[job_id]
         job.cancel()
     elif type0 == 'subfeedMessageRequest':
         request_id = msg['requestId']
         feed_uri = msg['feedUri']
         if not feed_uri: feed_uri = 'feed://' + self._default_feed_id
         subfeed_name = msg['subfeedName']
         position = msg['position']
         wait_msec = msg['waitMsec']
         self._log(
             f'subfeed_message_request-1 {request_id} {feed_uri} {subfeed_name} {position} {wait_msec}'
         )
         self._subfeed_message_requests[request_id] = {
             'feed_uri': feed_uri,
             'subfeed_name': subfeed_name,
             'position': position,
             'wait_msec': wait_msec,
             'timestamp': time.time()
         }
示例#16
0
def run_sorter_docker(sorter_name,
                      recording,
                      output_folder,
                      delete_output_folder=False,
                      grouping_property=None,
                      parallel=False,
                      verbose=False,
                      raise_error=True,
                      n_jobs=-1,
                      joblib_backend='loky',
                      use_docker=True,
                      container=None,
                      **params):
    if use_docker:
        # if container is None:
        #     assert sorter_name in default_docker_images, f"Default docker image for {sorter_name} not found"
        #     docker_image = default_docker_images[sorter_name]
        #
        # print(f"Running in docker image {docker_image.get_name()}")
        output_folder = Path(output_folder).absolute()
        output_folder.mkdir(exist_ok=True, parents=True)

        # dump recording with relative file paths to docker container /input folder
        dump_dict_container, input_directory = modify_input_folder(
            recording.dump_to_dict(), '/input')

        with hither.Config(use_container=False, show_console=True):
            kwargs = dict(recording_dict=dump_dict_container,
                          sorter_name=sorter_name,
                          output_folder=str(output_folder),
                          delete_output_folder=False,
                          grouping_property=grouping_property,
                          parallel=parallel,
                          verbose=verbose,
                          raise_error=raise_error,
                          n_jobs=n_jobs,
                          joblib_backend=joblib_backend)
            kwargs.update(params)
            kwargs.update({
                'input_directory': str(input_directory),
                'output_directory': str(output_folder)
            })

            sorting_job = hither.Job(run_sorter_docker_with_container, kwargs)
            sorting_job.wait()
        sorting = se.NpzSortingExtractor(output_folder / "sorting_docker.npz")
    else:
        # standard call
        sorting = ss.run_sorter(sorter_name,
                                recording,
                                output_folder=output_folder,
                                delete_output_folder=delete_output_folder,
                                grouping_property=grouping_property,
                                parallel=parallel,
                                verbose=verbose,
                                raise_error=raise_error,
                                n_jobs=n_jobs,
                                joblib_backend=joblib_backend,
                                **params)

    return sorting
def preload_download_recording(labbox, recording_object):
    jh = labbox.get_job_handler('partition1')
    with hi.Config(job_handler=jh, use_container=jh.is_remote()):
        return preload_download_recording_2.run(
            recording_object=recording_object)
def preload_check_sorting_downloaded(labbox, sorting_object):
    jh = labbox.get_job_handler('partition1')
    with hi.Config(job_handler=jh, use_container=jh.is_remote()):
        return preload_check_sorting_downloaded_2.run(
            sorting_object=sorting_object)