def createjob_get_similar_units(labbox, recording_object, sorting_object): from labbox_ephys import prepare_snippets_h5 jh = labbox.get_job_handler('partition3') jc = labbox.get_default_job_cache() with hi.Config(job_cache=jc, job_handler=jh, container=jh.is_remote): snippets_h5 = prepare_snippets_h5.run( recording_object=recording_object, sorting_object=sorting_object) return get_similar_units.run(snippets_h5=snippets_h5)
def preload_extract_snippets(labbox, recording_object, sorting_object): from labbox_ephys import prepare_snippets_h5 jh = labbox.get_job_handler('partition2') jc = labbox.get_job_cache() with hi.Config(job_cache=jc, job_handler=jh, container=jh.is_remote): snippets_h5 = prepare_snippets_h5.run( recording_object=recording_object, sorting_object=sorting_object) return snippets_h5
def createjob_get_firing_data(labbox, sorting_object, recording_object, configuration): jh = labbox.get_job_handler('partition3') jc = labbox.get_default_job_cache() with hi.Config(job_cache=jc, job_handler=jh, container=jh.is_remote): return get_firing_data.run(sorting_object=sorting_object, recording_object=recording_object, configuration=configuration)
def createjob_calculate_timeseries_info(labbox, recording_object): jh = labbox.get_job_handler('timeseries') jc = labbox.get_job_cache() with hi.Config( job_cache=jc, job_handler=jh, container=jh.is_remote ): return calculate_timeseries_info.run(recording_object=recording_object)
def createjob_fetch_pca_features(labbox, recording_object, sorting_object, unit_ids): jh = labbox.get_job_handler('partition2') jc = labbox.get_default_job_cache() with hi.Config(job_cache=jc, job_handler=jh, container=jh.is_remote): snippets_h5 = prepare_snippets_h5.run( recording_object=recording_object, sorting_object=sorting_object) return fetch_pca_features.run(snippets_h5=snippets_h5, unit_ids=unit_ids)
def createjob_get_timeseries_segment(labbox, recording_object, ds_factor, segment_num, segment_size): jh = labbox.get_job_handler('timeseries') jc = labbox.get_job_cache() with hi.Config(job_cache=jc, job_handler=jh, container=jh.is_remote): return get_timeseries_segment.run(recording_object=recording_object, ds_factor=ds_factor, segment_num=segment_num, segment_size=segment_size)
def createjob_fetch_spike_waveforms(labbox, recording_object, sorting_object, unit_ids, spike_indices): jh = labbox.get_job_handler('partition1') jc = labbox.get_job_cache() with hi.Config(job_cache=jc, job_handler=jh, container=jh.is_remote): snippets_h5 = prepare_snippets_h5.run( recording_object=recording_object, sorting_object=sorting_object) return fetch_spike_waveforms.run(snippets_h5=snippets_h5, unit_ids=unit_ids, spike_indices=spike_indices)
def createjob_fetch_average_waveform_plot_data(labbox, recording_object, sorting_object, unit_id): from labbox_ephys import prepare_snippets_h5 jh = labbox.get_job_handler('partition2') jc = labbox.get_default_job_cache() with hi.Config(job_cache=jc, job_handler=jh, container=jh.is_remote): snippets_h5 = prepare_snippets_h5.run( recording_object=recording_object, sorting_object=sorting_object) return fetch_average_waveform_plot_data.run(snippets_h5=snippets_h5, unit_id=unit_id)
def createjob_get_isi_violation_rates(labbox, sorting_object, recording_object, configuration={}): jh = labbox.get_job_handler('partition1') jc = labbox.get_job_cache() with hi.Config(job_cache=jc, job_handler=jh, container=jh.is_remote): return get_isi_violation_rates.run(sorting_object=sorting_object, recording_object=recording_object, configuration=configuration)
def createjob_get_peak_channels(labbox, sorting_object, recording_object, configuration={}): jh = labbox.get_job_handler('partition1') jc = labbox.get_job_cache() with hi.Config(job_cache=jc, job_handler=jh, container=jh.is_remote): snippets_h5 = prepare_snippets_h5.run( recording_object=recording_object, sorting_object=sorting_object) return get_peak_channels.run(snippets_h5=snippets_h5)
def createjob_get_sorting_unit_snippets(labbox, recording_object, sorting_object, unit_id, time_range, max_num_snippets): from labbox_ephys import prepare_snippets_h5 jh = labbox.get_job_handler('partition1') jc = labbox.get_job_cache() with hi.Config(job_cache=jc, job_handler=jh, container=jh.is_remote): snippets_h5 = prepare_snippets_h5.run( recording_object=recording_object, sorting_object=sorting_object) return get_sorting_unit_snippets.run(snippets_h5=snippets_h5, unit_id=unit_id, time_range=time_range, max_num_snippets=max_num_snippets)
def createjob_fetch_correlogram_plot_data(labbox, sorting_object, unit_x, unit_y=None): jh = labbox.get_job_handler('partition1') jc = labbox.get_default_job_cache() with hi.Config( job_cache=jc, job_handler=jh, container=jh.is_remote ): return fetch_correlogram_plot_data.run( sorting_object=sorting_object, unit_x=unit_x, unit_y=unit_y )
def createjob_get_recording_info(labbox, recording_object): jc = labbox.get_default_job_cache() with hi.Config(job_cache=jc): return get_recording_info.run(recording_object=recording_object)
import os from pathlib import Path import sys from neuropixels_data_sep_2020 import prepare_cortexlab_datasets, prepare_cortexlab_drift_datasets, prepare_allen_datasets, prepare_svoboda_datasets, prepare_contributed_sortings from neuropixels_data_sep_2020.uploader import upload_files_to_compute_resource import labbox_ephys as le aws_url = 'http://ephys1.laboratorybox.org' compute_resource_uri = 'feed://1afa93d013bb6a5f68e87186c6bd43e11cefb9da2fddc8837c30a47c1a7bf72f?name=ephys1' #jc = hi.JobCache(use_tempdir=True) jc = None with hi.RemoteJobHandler(compute_resource_uri=compute_resource_uri) as jh: with hi.Config(job_handler=jh, container=True, job_cache=jc): le_recordings1, le_sortings1 = prepare_cortexlab_datasets() le_recordings2 = prepare_cortexlab_drift_datasets() le_recordings3, le_sortings3 = prepare_allen_datasets() le_recordings4, le_sortings4, le_curation_actions4 = prepare_svoboda_datasets() hi.wait() le_recordings = le_recordings1 + le_recordings2 + le_recordings3 + le_recordings4 le_sortings = le_sortings1 + le_sortings3 + le_sortings4 le_curation_actions = le_curation_actions4 le_recordings_by_id = {} for r in le_recordings: le_recordings_by_id[r['recordingId']] = r contributed_sortings = prepare_contributed_sortings(le_recordings_by_id) le_sortings = le_sortings + contributed_sortings
def handle_message(self, msg): type0 = msg.get('type') if type0 == 'reportClientInfo': print('reported client info:', msg) self._feed_uri = msg['clientInfo']['feedUri'] self._document_id = msg['clientInfo']['documentId'] self._readonly = msg['clientInfo']['readOnly'] if not self._feed_uri: self._feed_uri = 'feed://' + self._default_feed_id # self._feed_uri = kp.create_feed(feed_name='labbox-ephys-default').get_uri() # assert self._feed_uri.startswith('sha1://'), 'For now, feedUri must start with sha1://' self._feed = kp.load_feed(self._feed_uri) for key in ['recordings', 'sortings']: self._subfeed_positions[key] = 0 subfeed_name = dict(key=key, documentId=self._document_id) subfeed = self._feed.get_subfeed(subfeed_name) for m in subfeed.get_next_messages(wait_msec=10): self._send_message({ 'type': 'action', 'action': m['action'] }) self._subfeed_positions[ key] = self._subfeed_positions[key] + 1 self._send_message({'type': 'reportInitialLoadComplete'}) if self._feed: qm = self._queued_document_action_messages self._queued_document_action_messages = [] for m in qm: self.handle_message(m) elif type0 == 'appendDocumentAction': if self._readonly: print( 'Cannot append document action. This is a readonly feed.') return if self._feed is None: self._queued_document_action_messages.append(msg) else: subfeed_name = dict(key=msg['key'], documentId=self._document_id) subfeed = self._feed.get_subfeed(subfeed_name) subfeed.append_message({'action': msg['action']}) elif type0 == 'hitherCreateJob': functionName = msg['functionName'] kwargs = msg['kwargs'] opts = msg['opts'] client_job_id = msg['clientJobId'] if opts.get('newHitherJobMethod', False): try: job = hi.run(functionName, **kwargs, labbox=self._labbox_context).wait() except Exception as err: self._send_message({ 'type': 'hitherJobCreationError', 'client_job_id': client_job_id, 'error': str(err) + ' (new method)' }) return setattr(job, '_client_job_id', client_job_id) job_id = job._job_id self._jobs_by_id[job_id] = job print( f'======== Created hither job (2): {job_id} {functionName}' ) self._send_message({ 'type': 'hitherJobCreated', 'job_id': job_id, 'client_job_id': client_job_id }) else: hither_config = opts.get('hither_config', {}) job_handler_name = opts.get('job_handler_name', 'default') required_files = opts.get('required_files', {}) jh = self._get_job_handler_from_name(job_handler_name) hither_config['job_handler'] = jh hither_config['required_files'] = required_files if hither_config['job_handler'].is_remote: hither_config['container'] = True if 'use_job_cache' in hither_config: if hither_config['use_job_cache']: hither_config['job_cache'] = self._default_job_cache del hither_config['use_job_cache'] with hi.Config(**hither_config): try: job = hi.run(functionName, **kwargs) except Exception as err: self._send_message({ 'type': 'hitherJobCreationError', 'client_job_id': client_job_id, 'error': str(err) + ' (old method)' }) return setattr(job, '_client_job_id', client_job_id) job_id = job._job_id self._jobs_by_id[job_id] = job print( f'======== Created hither job: {job_id} {functionName} ({job_handler_name})' ) self._send_message({ 'type': 'hitherJobCreated', 'job_id': job_id, 'client_job_id': client_job_id }) elif type0 == 'hitherCancelJob': job_id = msg['job_id'] assert job_id, 'Missing job_id' assert job_id in self._jobs_by_id, f'No job with id: {job_id}' job = self._jobs_by_id[job_id] job.cancel()
def preload_download_recording(labbox, recording_object): jh = labbox.get_job_handler('partition1') with hi.Config(job_handler=jh, container=jh.is_remote): return preload_download_recording_2.run( recording_object=recording_object)
def preload_check_sorting_downloaded(labbox, sorting_object): jh = labbox.get_job_handler('partition1') with hi.Config(job_handler=jh, container=jh.is_remote): return preload_check_sorting_downloaded_2.run( sorting_object=sorting_object)
recording_format='subrecording', data=dict(recording=recording_object, start_frame=0, end_frame=30000 * 60 * 10, channel_ids=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])) # jc = hi.JobCache(use_tempdir=True) jc = None with hi.RemoteJobHandler( # substitute your own compute resource URI here compute_resource_uri= 'feed://09b27ce6c71add9fe6effaf351fce98d867d6fa002333a8b06565b0a108fb0ba?name=ephys1' # compute_resource_uri='feed://644c145d5f6088623ee59f3437655e185657a6d9a9676294f26ae504423565fa?name=lke9849-12258-5f50fc6bb944 ' ) as jh: with hi.Config(container=True, job_cache=jc, job_handler=jh, required_files=recording_object): x = le.sorters.mountainsort4.run( recording_object=recording_object).wait() sorting_object = x['sorting_object'] le_recordings = [] le_sortings = [] le_curation_actions = [] le_recordings.append( dict(recordingId='test-recording-1', recordingLabel='test-recording-1', recordingPath=ka.store_object(recording_object, basename='test-recording-1.json'), recordingObject=recording_object, description='''
def createjob_get_recording_info(labbox, recording_object): jc = labbox.get_job_cache() jh = labbox.get_job_handler('partition1') with hi.Config(job_cache=jc, job_handler=jh, container=jh.is_remote): return get_recording_info.run(recording_object=recording_object)
def createjob_test_hello_ext1(labbox, x: float): jh = labbox.get_job_handler('partition1') jc = labbox.get_job_cache() with hi.Config(job_cache=jc, job_handler=jh, container=jh.is_remote): return test_hello_ext1.run(x=x)
def upload_files_to_compute_resource(x): print(f'Uploading to compute resource:') print(json.dumps(x, indent=4)) with hi.Config(required_files=[x], force_run=True): hi.noop.run()