def _internal_serialize_result(result): import kachery as ka ret: Dict[Any] = dict( output_files=dict() ) ret['name'] = 'hither_result' ret['runtime_info'] = deepcopy(result.runtime_info) ret['runtime_info']['console_out'] = ka.store_object(ret['runtime_info'].get('console_out', '')) for oname in result._output_names: path = getattr(result.outputs, oname)._path if path is not None: ret['output_files'][oname] = ka.store_file(path) else: ret['output_files'][oname] = None ret['retval'] = result.retval ret['success'] = result.success ret['version'] = result.version ret['container'] = result.container ret['hash_object'] = result.hash_object ret['hash'] = ka.get_object_hash(result.hash_object) ret['status'] = result.status return ret
def __init__(self, *, recording_directory=None, timeseries_path=None, download=False, samplerate=None, geom=None, geom_path=None, params_path=None): RecordingExtractor.__init__(self) if recording_directory: timeseries_path = recording_directory + '/raw.mda' geom_path = recording_directory + '/geom.csv' params_path = recording_directory + '/params.json' self._timeseries_path = timeseries_path if params_path: self._dataset_params = ka.load_object(params_path) self._samplerate = self._dataset_params['samplerate'] else: self._dataset_params = dict(samplerate=samplerate) self._samplerate = samplerate if download: path0 = ka.load_file(path=self._timeseries_path) if not path0: raise Exception('Unable to realize file: ' + self._timeseries_path) self._timeseries_path = path0 self._timeseries = DiskReadMda(self._timeseries_path) if self._timeseries is None: raise Exception('Unable to load timeseries: {}'.format( self._timeseries_path)) X = self._timeseries if geom is not None: self._geom = geom elif geom_path: geom_path2 = ka.load_file(geom_path) self._geom = np.genfromtxt(geom_path2, delimiter=',') else: self._geom = np.zeros((X.N1(), 2)) if self._geom.shape[0] != X.N1(): # raise Exception( # 'Incompatible dimensions between geom.csv and timeseries file {} <> {}'.format(self._geom.shape[0], X.N1())) print( 'WARNING: Incompatible dimensions between geom.csv and timeseries file {} <> {}' .format(self._geom.shape[0], X.N1())) self._geom = np.zeros((X.N1(), 2)) self._hash = ka.get_object_hash( dict(timeseries=ka.get_file_hash(self._timeseries_path), samplerate=self._samplerate, geom=_json_serialize(self._geom))) self._num_channels = X.N1() self._num_timepoints = X.N2() for m in range(self._num_channels): self.set_channel_property(m, 'location', self._geom[m, :])
def _samplehash(recording): # from mountaintools import client as mt obj = { 'channels': tuple(recording.get_channel_ids()), 'frames': recording.get_num_frames(), 'data': _samplehash_helper(recording) } return ka.get_object_hash(obj)
def hash(self): params = self.paramsForHash() # pylint: disable=assignment-from-none if params is None: raise Exception( 'Cannot compute hash. Params for hash not implemented.') return ka.get_object_hash( dict(name='FilterRecording', params=params, recording=self._recording.hash()))
def _load_result(*, hash_object): import kachery as ka import loggery name0 = 'hither_result' hash0 = ka.get_object_hash(hash_object) doc = loggery.find_one({'message.name': name0, 'message.hash': hash0}) if doc is None: return None return doc['message']
def _load_job_result_from_cache(*, hash_object, cache): import kachery as ka import loggery if type(cache) == str: cache = dict(preset=cache) with loggery.config(**cache): name0 = 'hither_result' hash0 = ka.get_object_hash(hash_object) doc = loggery.find_one({'message.name': name0, 'message.hash': hash0}) if doc is None: return None return doc['message']
def hash(self): if not self._recording_hash: if hasattr(self._recording, 'hash'): if type(self._recording.hash) == str: self._recording_hash = self._recording.hash else: self._recording_hash = self._recording.hash() else: self._recording_hash = _samplehash(self._recording) return ka.get_object_hash( dict(name='downsampled-recording-extractor', version=2, recording=self._recording_hash, ds_factor=self._ds_factor, input_has_minmax=self._input_has_minmax))
def _serialize_result(result): import kachery as ka ret = dict(output_files=dict()) ret['name'] = 'hither_result' ret['runtime_info'] = result.runtime_info ret['runtime_info']['stdout'] = ka.store_text( ret['runtime_info']['stdout']) ret['runtime_info']['stderr'] = ka.store_text( ret['runtime_info']['stderr']) for oname in result._output_names: path = getattr(result.outputs, oname)._path ret['output_files'][oname] = ka.store_file(path) ret['retval'] = result.retval ret['hash_object'] = result.hash_object ret['hash'] = ka.get_object_hash(result.hash_object) return ret
def hash(self): return ka.get_object_hash( dict(firings=ka.get_file_hash(self._firings_path), samplerate=self._sampling_frequency))
def hash(self) -> str: return ka.get_object_hash(self.object())