def javascript_state_changed(self, prev_state, state):
        self._set_status('running', 'Running TwoPhotonSeries')

        nwb_path = state.get('nwb_path', None)
        download_from = state.get('download_from', [])

        if not nwb_path:
            self._set_error('Missing nwb_path')
            return

        mt.configDownloadFrom(download_from)

        nwb_path2 = mt.realizeFile(nwb_path)
        if not nwb_path2:
            self._set_error('Unable to realize nwb file: {}'.format(nwb_path))
            return
        
        self._set_status('running', 'Extracting .mp4 data')
        outputs = ExtractTwoPhotonSeriesMp4.execute(nwb_in=nwb_path2, mp4_out={'ext': '.mp4'}).outputs

        self._set_status('running', 'Reading .mp4 data')
        mp4_fname = mt.realizeFile(outputs['mp4_out'])
        with open(mp4_fname, 'rb') as f:
            video_data = f.read()

        self._set_status('running', 'Encoding .mp4 data')
        video_data_b64 = base64.b64encode(video_data).decode()
        video_url = 'data:video/mp4;base64,{}'.format(video_data_b64)

        self._set_status('running', 'Setting .mp4 data to python state')
        self.set_python_state(dict(
            video_url=video_url,
            status='finished',
            status_message=''
        ))
예제 #2
0
def _download_recordings(*, jobs):
    for _, job in enumerate(jobs):
        val = mt.getValue(key=job)
        if not val:
            if 'recording' in job:
                if 'directory' in job['recording']:
                    dsdir = job['recording']['directory']
                    fname = dsdir + '/raw.mda'
                    print('REALIZING FILE: ' + fname)
                    mt.realizeFile(path=fname)
예제 #3
0
    def javascript_state_changed(self, prev_state, state):
        self.set_python_state(dict(status='running', status_message='Running'))

        # get javascript state
        download_from = state.get('download_from', [])
        path = state.get('path', None)
        name = state.get('name', None)

        if path and name:
            mt.configDownloadFrom(download_from)
            if path.endswith('.nwb'):
                self.set_python_state(
                    dict(status_message='Realizing object from nwb file: {}'.
                         format(path)))
                obj = nwb_to_dict(path, use_cache=True)
            else:
                self.set_python_state(
                    dict(status_message='Realizing object: {}'.format(path)))
                obj = mt.loadObject(path=path)
            if not obj:
                self.set_python_state(
                    dict(status='error',
                         status_message='Unable to realize object: {}'.format(
                             path)))
                return
            datasets = obj['general']['subject']['cortical_surfaces'][name][
                '_datasets']
            faces0 = np.load(mt.realizeFile(datasets['faces']['_data']))
            vertices = np.load(mt.realizeFile(datasets['vertices']['_data'])).T

            # there's a better way to do the following
            # (need to get it into a single vector format)
            faces = []
            for j in range(faces0.shape[0]):
                # 3 = #vertices in polygon (assume a triangulation)
                faces.extend([3, faces0[j, 0], faces0[j, 1], faces0[j, 2]])
            faces = np.array(faces)

            # return this python state to the javascript
            self.set_python_state(
                dict(faces=faces,
                     vertices=vertices,
                     status='finished',
                     status_message='Done.'))
        else:
            self.set_python_state(
                dict(status='error',
                     status_message='Missing path and/or name'))
예제 #4
0
def load_ntrode(path, *, name, epoch_name, processed_path=None):
    # use the .geom.csv if it exists (we assume path ends with .mda)
    geom_file = path[0:-4] + '.geom.csv'
    if mt.findFile(geom_file):
        print('Using geometry file: {}'.format(geom_file))
    else:
        # if doesn't exist, we will create a trivial geom later
        geom_file = None
    
    path2 = mt.realizeFile(path)
    if not path2:
        raise Exception('Unable to realize file: ' + path)
    
    X = mdaio.DiskReadMda(path2)
    num_channels = X.N1()
    num_timepoints = X.N2()

    processed_info = load_ntrode_processed_info(processed_path, recording_path=path, epoch_name=epoch_name, ntrode_name=name)

    # here's the structure for representing ntrode information
    return dict(
        type='ntrode',
        name=name,
        epoch_name=epoch_name,
        path=path,
        processed_path=processed_path,
        recording_file=path,
        geom_file=geom_file,
        num_channels=num_channels,
        num_timepoints=num_timepoints,
        samplerate=30000,  # fix this
        processed_info=processed_info
    )
예제 #5
0
def _load_nwb_json(path):
    path2 = mt.realizeFile(path=path)
    if not path2:
        raise Exception('Unable to realize file: {}'.format(path))
    with open(path2, 'r') as f:
        obj = json.load(f)
    return obj
예제 #6
0
 def javascript_state_changed(self, prev_state, state):
     self.set_python_state(dict(status='running', status_message='Running'))
     mt.configDownloadFrom(state.get('download_from', []))
     path = state.get('path', None)
     if path:
         self.set_python_state(
             dict(status_message='Realizing file: {}'.format(path)))
         if path.endswith('.csv'):
             path2 = mt.realizeFile(path)
             if not path2:
                 self.set_python_state(
                     dict(
                         status='error',
                         status_message='Unable to realize file: {}'.format(
                             path)))
                 return
             self.set_python_state(dict(status_message='Loading locatoins'))
             x = np.genfromtxt(path2, delimiter=',')
             locations = x.T
             num_elec = x.shape[0]
             labels = ['{}'.format(a) for a in range(1, num_elec + 1)]
         else:
             raise Exception('Unexpected file type for {}'.format(path))
     else:
         locations = [[0, 0], [1, 0], [1, 1], [2, 1]]
         labels = ['1', '2', '3', '4']
     state = dict()
     state['locations'] = locations
     state['labels'] = labels
     state['status'] = 'finished'
     self.set_python_state(state)
예제 #7
0
def test_mandelbrot():
    from mountaintools import client as mt
    import mlprocessors as mlpr
    from .mandelbrot import compute_mandelbrot, show_mandelbrot, combine_subsampled_mandelbrot, ComputeMandelbrot, compute_mandelbrot_parallel
    import numpy as np

    num_x = 4000

    result = ComputeMandelbrot.execute(
        num_iter=10,
        num_x=num_x,
        output_npy=dict(ext='.npy', upload=True)
    )
    X = np.load(mt.realizeFile(result.outputs['output_npy']))

    Y = compute_mandelbrot_parallel(
        num_iter=10,
        num_x=num_x,
        num_parallel=3,
        use_slurm=False,
        _force_run=True
    )

    Z = compute_mandelbrot_parallel(
        num_iter=10,
        num_x=num_x,
        num_parallel=3,
        use_slurm=False,
        _force_run=False
    )

    print(X.shape, Y.shape, Z.shape)

    assert np.all(np.isclose(X, Y))
    assert np.all(np.isclose(X, Z))
예제 #8
0
    def __init__(self, dataset_directory, *, download=True, raw_fname='raw.mda', params_fname='params.json'):
        RecordingExtractor.__init__(self)
        self._dataset_directory = dataset_directory
        self._timeseries_path = dataset_directory + '/' + raw_fname
        self._dataset_params = read_dataset_params(dataset_directory, params_fname)
        self._samplerate = self._dataset_params['samplerate'] * 1.0
        if download:
            path0 = mt.realizeFile(path=self._timeseries_path)
            if not path0:
                raise Exception('Unable to realize file: ' + self._timeseries_path)
            self._timeseries_path = path0

        geom0 = dataset_directory + '/geom.csv'
        self._geom_fname = mt.realizeFile(path=geom0)
        self._geom = np.genfromtxt(self._geom_fname, delimiter=',')

        timeseries_path_or_url = self._timeseries_path
        if not mt.isLocalPath(timeseries_path_or_url):
            a = mt.findFile(timeseries_path_or_url)
            if not a:
                raise Exception('Cannot find timeseries file: ' + timeseries_path_or_url)
            timeseries_path_or_url = a

        # if is_kbucket_url(timeseries0):
        #     download_needed = is_url(ca.findFile(path=timeseries0))
        # else:
        #     download_needed = is_url(timeseries0)
        # if download and download_needed:
        #     print('Downloading file: ' + timeseries0)
        #     self._timeseries_path = ca.realizeFile(path=timeseries0)
        #     print('Done.')
        # else:
        #     self._timeseries_path = ca.findFile(path=timeseries0)

        X = DiskReadMda(timeseries_path_or_url)
        if self._geom.shape[0] != X.N1():
            # raise Exception(
            #    'Incompatible dimensions between geom.csv and timeseries file {} <> {}'.format(self._geom.shape[0], X.N1()))
            print('WARNING: Incompatible dimensions between geom.csv and timeseries file {} <> {}'.format(self._geom.shape[0], X.N1()))
            self._geom = np.zeros((X.N1(), 2))

        self._num_channels = X.N1()
        self._num_timepoints = X.N2()
        for m in range(self._num_channels):
            self.set_channel_property(m, 'location', self._geom[m, :])
예제 #9
0
def read_dataset_params(dsdir, params_fname):
    fname1 = dsdir + '/' + params_fname
    fname2 = mt.realizeFile(path=fname1)
    if not fname2:
        raise Exception('Unable to find file: ' + fname1)
    if not os.path.exists(fname2):
        raise Exception('Dataset parameter file does not exist: ' + fname2)
    with open(fname2) as f:
        return json.load(f)
예제 #10
0
    def run(self):
        nwb_obj = nwb_to_dict(self.nwb_in, use_cache=True)
        npy_path = nwb_obj['acquisition']['TwoPhotonSeries']['_datasets']['data']['_data']
        npy_path2 = mt.realizeFile(npy_path)
        if not npy_path2:
            nwb_obj = nwb_to_dict(self.nwb_in, use_cache=False)
            npy_path = nwb_obj['acquisition']['TwoPhotonSeries']['_datasets']['data']['_data']
            npy_path2 = mt.realizeFile(npy_path)
            if not npy_path2:
                self._set_error('Unable to realize npy file: {}'.format(npy_path))
                return
        X = np.load(npy_path2)

        # Note that there is a bug in imageio.mimwrite that prevents us to
        # write to a memory buffer.
        # See: https://github.com/imageio/imageio/issues/157

        imageio.mimwrite(self.mp4_out, X, format='mp4', fps=10)
예제 #11
0
def test_mandelbrot_errors():
    from mountaintools import client as mt
    import mlprocessors as mlpr
    from .mandelbrot import ComputeMandelbrotWithError, combine_subsampled_mandelbrot
    import numpy as np

    num_iter = 10
    num_x = 50
    num_parallel = 1
    subsampling_factor = num_parallel

    job_args = [
        dict(
            num_x=num_x,
            num_iter=num_iter,
            subsampling_factor=subsampling_factor,
            subsampling_offset=offset,
            output_npy=dict(ext='.npy', upload=True),
            throw_error=(offset == 0),
            _force_run=False
            # _container='sha1://87319c2856f312ccc3187927ae899d1d67b066f9/03-20-2019/mountaintools_basic.simg'
        )
        for offset in range(subsampling_factor)
    ]

    jobs = ComputeMandelbrotWithError.createJobs(job_args)

    results = []
    for job in jobs:
        results.append(job.execute())

    X_list = []
    for result0 in results:
        if result0.retcode == 0:
            X0 = np.load(mt.realizeFile(result0.outputs['output_npy']))
            X_list.append(X0)
        else:
            print('Warning: retcode is non-zero for job.')
            print('============================================= BEGIN CONSOLE OUT ==========================================')
            print(mt.realizeFile(result0.console_out))
            print('============================================= END CONSOLE OUT ==========================================')

    if len(X_list) > 0:
        _ = combine_subsampled_mandelbrot(X_list)
예제 #12
0
def read_dataset_params(dsdir):
    # ca = _load_required_modules()
    fname1 = dsdir + '/params.json'
    fname2 = mt.realizeFile(path=fname1)
    if not fname2:
        raise Exception('Unable to find file: ' + fname1)
    if not os.path.exists(fname2):
        raise Exception('Dataset parameter file does not exist: ' + fname2)
    with open(fname2) as f:
        return json.load(f)
예제 #13
0
    def __init__(self, firings_file):
        SortingExtractor.__init__(self)
        if is_kbucket_url(firings_file):
            download_needed = is_url(mt.findFile(path=firings_file))
        else:
            download_needed = is_url(firings_file)
        if download_needed:
            print('Downloading file: ' + firings_file)
            self._firings_path = mt.realizeFile(path=firings_file)
            print('Done.')
        else:
            self._firings_path = mt.realizeFile(path=firings_file)
        if not self._firings_path:
            raise Exception('Unable to realize firings file: ' + firings_file)

        self._firings = readmda(self._firings_path)
        self._times = self._firings[1, :]
        self._labels = self._firings[2, :]
        self._unit_ids = np.unique(self._labels).astype(int)
예제 #14
0
    def javascript_state_changed(self, prev_state, state):
        self._set_status('running', 'Running surface3D')
        mt.configDownloadFrom(state.get('download_from', []))

        python_state = dict()

        path0 = state.get('faces_path', None)
        if path0:
            x = mt.realizeFile(path0)
            if not x:
                self._set_error('Unable to load file: {}'.format(path0))
                return
            faces0 = np.load(x)
            # there's a better way to do the following
            # (need to get it into a single vector format)
            faces = []
            for j in range(faces0.shape[0]):
                # 3 = #vertices in polygon (assume a triangulation)
                faces.extend([3, faces0[j, 0], faces0[j, 1], faces0[j, 2]])
            faces = np.array(faces)
            python_state['faces'] = faces

        path0 = state.get('vertices_path', None)
        if path0:
            x = mt.realizeFile(path0)
            if not x:
                self._set_error('Unable to load file: {}'.format(path0))
                return
            vertices0 = np.load(x)
            python_state['vertices'] = vertices0.T

        path0 = state.get('scalars_path', None)
        if path0:
            x = mt.realizeFile(path0)
            if not x:
                self._set_error('Unable to load file: {}'.format(path0))
                return
            x = np.load(x)
            python_state['scalars'] = x

        python_state['status'] = 'finished'
        python_state['status_message'] = 'finished'
        self.set_python_state(python_state)
예제 #15
0
    def javascript_state_changed(self, prev_state, state):
        self.set_python_state(dict(status='running', status_message='Running'))
        mt.configDownloadFrom(state.get('download_from', []))

        max_samples = state.get('max_samples')
        max_dt_msec = state.get('max_dt_msec')
        bin_size_msec = state.get('bin_size_msec')
        if not max_dt_msec:
            return

        firings_path = state.get('firingsPath', None)
        if not firings_path:
            self.set_python_state(dict(
                status='error',
                status_message='No firingsPath provided'
            ))
            return

        samplerate = state.get('samplerate', None)
        if not samplerate:
            self.set_python_state(dict(
                status='error',
                status_message='No samplerate provided'
            ))
            return

        self.set_python_state(dict(status_message='Realizing file: {}'.format(firings_path)))
        firings_path2 = mt.realizeFile(firings_path)
        if not firings_path2:
            self.set_python_state(dict(
                status='error',
                status_message='Unable to realize file: {}'.format(firings_path)
            ))
            return

        result = ComputeAutocorrelograms.execute(
            firings_path=firings_path2,
            samplerate=samplerate,
            max_samples=max_samples,
            bin_size_msec=bin_size_msec,
            max_dt_msec=max_dt_msec,
            json_out=dict(ext='.json')
        )
        if result.retcode != 0:
            self.set_python_state(dict(
                status='error',
                status_message='Error computing autocorrelogram.'
            ))
            return

        output = mt.loadObject(path=result.outputs['json_out'])
        self.set_python_state(dict(
            status='finished',
            output=output
        ))
예제 #16
0
 def plot(self, name: str, format: str = 'image'):
     plots = self._obj['summary'].get('plots', dict())
     url = plots[name]
     if format == 'url':
         return url
     else:
         path = mt.realizeFile(url)
         if format == 'image':
             return Image.open(path)
         elif format == 'path':
             return path
         else:
             raise Exception('Invalid format: ' + format)
예제 #17
0
def h5_to_dict(fname, *, upload_to=None, use_cache=False):
    if use_cache:
        result = H5ToDict.execute(h5_in=fname,
                                  upload_to=upload_to or '',
                                  json_out={'ext': '.json'})
        if result.retcode != 0:
            raise Exception('Problem running H5ToDict.')
        return mt.loadObject(path=result.outputs['json_out'])

    fname = mt.realizeFile(path=fname)
    opts = dict(upload_to=upload_to)
    with h5py.File(fname, 'r') as f:
        opts['file'] = f
        return _h5_to_dict(f, opts=opts, name=None)
예제 #18
0
 def plot(self, name: str, format: str = 'image'):
     assert self._summary_result is not None, 'No summary result found'
     plots = self._summary_result.get('plots', dict())
     url = plots[name]
     if format == 'url':
         return url
     else:
         path = mt.realizeFile(url)
         if format == 'image':
             return Image.open(path)
         elif format == 'path':
             return path
         else:
             raise Exception('Invalid format: ' + format)
예제 #19
0
def _extract_time_ranges(obj, time_ranges):
    positions_path = obj['processing']['Behavior']['Position']['Position'][
        '_datasets']['data']['_data']
    timestamps_path = obj['processing']['Behavior']['Position']['Position'][
        '_datasets']['timestamps']['_data']
    spike_times_path = obj['units']['_datasets']['spike_times']['_data']
    spike_times_index = obj['units']['_datasets']['spike_times_index']['_data']
    positions = np.load(mt.realizeFile(path=positions_path))
    timestamps = np.load(mt.realizeFile(path=timestamps_path))
    spike_times = np.load(mt.realizeFile(path=spike_times_path))
    selector = np.full(timestamps.shape, False)
    for time_range in time_ranges:
        a = (time_range[0] <= timestamps) & (timestamps < time_range[1])
        selector = selector | a
    if np.all(selector):
        return obj
    timestamps = timestamps[selector]
    positions = positions[selector, :]

    selector2 = np.full(spike_times.shape, False)
    for time_range in time_ranges:
        a = (time_range[0] <= spike_times) & (spike_times < time_range[1])
        selector2 = selector2 | a
    for j in range(len(spike_times_index)):
        spike_times_index[j] = np.count_nonzero(
            selector2[:spike_times_index[j]])
    spike_times = spike_times[selector2]

    obj = deepcopy(obj)
    obj['processing']['Behavior']['Position']['Position']['_datasets']['data'][
        '_data'] = _np_snapshot(positions)
    obj['processing']['Behavior']['Position']['Position']['_datasets'][
        'timestamps']['_data'] = _np_snapshot(timestamps)
    obj['units']['_datasets']['spike_times']['_data'] = _np_snapshot(
        spike_times)
    obj['units']['_datasets']['spike_times_index']['_data'] = spike_times_index
    return obj
예제 #20
0
    def __init__(self,
                 *,
                 recording_directory=None,
                 timeseries_path=None,
                 download=True,
                 samplerate=None,
                 geom=None,
                 geom_path=None,
                 params_path=None):
        RecordingExtractor.__init__(self)
        if recording_directory:
            timeseries_path = recording_directory + '/raw.mda'
            geom_path = recording_directory + '/geom.csv'
            params_path = recording_directory + '/params.json'
        self._timeseries_path = timeseries_path
        if params_path:
            self._dataset_params = read_dataset_params(params_path)
            self._samplerate = self._dataset_params['samplerate']
        else:
            self._dataset_params = dict(samplerate=samplerate)
            self._samplerate = samplerate

        if download:
            path0 = mt.realizeFile(path=self._timeseries_path)
            if not path0:
                raise Exception('Unable to realize file: ' +
                                self._timeseries_path)
            self._timeseries_path = path0

        X = DiskReadMda(self._timeseries_path)
        if geom:
            self._geom = geom
        elif geom_path:
            self._geom = np.genfromtxt(geom_path, delimiter=',')
        else:
            self._geom = np.zeros((X.N1(), 2))

        if self._geom.shape[0] != X.N1():
            # raise Exception(
            #    'Incompatible dimensions between geom.csv and timeseries file {} <> {}'.format(self._geom.shape[0], X.N1()))
            print(
                'WARNING: Incompatible dimensions between geom.csv and timeseries file {} <> {}'
                .format(self._geom.shape[0], X.N1()))
            self._geom = np.zeros((X.N1(), 2))

        self._num_channels = X.N1()
        self._num_timepoints = X.N2()
        for m in range(self._num_channels):
            self.set_channel_property(m, 'location', self._geom[m, :])
예제 #21
0
    def javascript_state_changed(self, prev_state, state):
        self.set_state(dict(status='running', status_message='Running'))

        vtp_path = state.get('vtp_path', None)
        download_from = state.get('download_from', None)
        scalar_info = state.get('scalar_info', None)
        vector_field_info = state.get('vector_field_info', None)
        arrow_subsample_factor = state.get('arrow_subsample_factor', None)

        if not vtp_path:
            self.set_state(dict(status='error', status_message='No vtp_path'))
            return

        if download_from:
            mt.configDownloadFrom(download_from)
        fname = mt.realizeFile(path=vtp_path)

        reader = vtkXMLPolyDataReader()
        reader.SetFileName(fname)
        reader.Update()
        X = reader.GetOutput()
        vertices = vtk_to_numpy(X.GetPoints().GetData()).T
        faces = vtk_to_numpy(X.GetPolys().GetData())

        if scalar_info:
            scalars = vtk_to_numpy(X.GetPointData().GetArray(
                scalar_info['name']))
            scalars = scalars[:, scalar_info['component']]
        else:
            scalars = None

        if vector_field_info:
            vector_field = vtk_to_numpy(X.GetPointData().GetArray(
                vector_field_info['name']))
            vector_field = vector_field[:, vector_field_info['components']]
            arrows = [
                dict(start=vertices[:, j] - vector_field[j, :].T / 2,
                     end=vertices[:, j] + vector_field[j, :].T / 2) for j in
                range(0, vector_field.shape[0], arrow_subsample_factor)
            ]
        else:
            arrows = None

        self.set_state(
            dict(status='finished',
                 vertices=vertices,
                 faces=faces,
                 scalars=scalars,
                 arrows=arrows))
예제 #22
0
    def javascript_state_changed(self, prev_state, state):
        self.set_python_state(dict(status='running', status_message='Running'))
        mt.configDownloadFrom(state.get('download_from', []))
        nwb_path = state.get('nwb_path', None)
        downsample_factor = state.get('downsample_factor', 1)
        if nwb_path:
            if nwb_path.endswith('.nwb'):
                self.set_python_state(
                    dict(status_message='Realizing object from nwb file: {}'.
                         format(nwb_path)))
                obj = h5_to_dict(nwb_path, use_cache=True)
            else:
                self.set_python_state(
                    dict(status_message='Realizing object: {}'.format(
                        nwb_path)))
                obj = mt.loadObject(path=nwb_path)
            if not obj:
                self.set_python_state(
                    dict(status='error',
                         status_message='Unable to realize object: {}'.format(
                             nwb_path)))
                return
            try:
                positions_path = obj['processing']['Behavior']['Position'][
                    'Position']['_datasets']['data']['_data']
            except:
                self.set_python_state(
                    dict(status='error',
                         status_message=
                         'Problem extracting behavior positions in file: {}'.
                         format(nwb_path)))
                return
            positions = np.load(mt.realizeFile(path=positions_path))
            positions = positions[::downsample_factor, :]

            self.set_python_state(
                dict(status_message='Finished loading positions'))
            state['positions'] = positions
            state['status'] = 'finished'
            self.set_python_state(state)
        else:
            self.set_python_state(
                dict(status='error',
                     status_message='Missing in state: nwb_path'))
예제 #23
0
def _initialize(context, unit_id, connection_to_parent):
    with StdoutSender(connection=connection_to_parent):
        try:
            print('***** Preparing efficient access recording extractor...')
            earx = EfficientAccessRecordingExtractor(
                recording=context.recordingExtractor())
            print('***** Computing unit detail...')
            path0 = mt.realizeFile(path=ComputeUnitDetail.execute(
                recording=earx,
                sorting=context.trueSortingExtractor(),
                unit_id=unit_id,
                output=True).outputs['output'])
            with open(path0, 'rb') as f:
                result0 = pickle.load(f)
            print('*****')
        except:
            traceback.print_exc()
            raise
    connection_to_parent.send(dict(name='result', result=result0))
예제 #24
0
def compute_units_info_b(*,
                         recording_dir,
                         firings,
                         channel_ids=[],
                         unit_ids=[],
                         return_format='json'):
    out = ComputeUnitsInfo.execute(recording_dir=recording_dir,
                                   firings=firings,
                                   unit_ids=unit_ids,
                                   channel_ids=channel_ids,
                                   json_out={
                                       'ext': '.json'
                                   },
                                   _container='default').outputs
    fname = out['json_out']
    if return_format == 'filename':
        return fname
    else:
        fname = mt.realizeFile(path=fname)
        with open(fname) as f:
            return json.load(f)
    def __init__(self, *, path=None, recording=None, _dest_path=None):
        #import h5py
        se.RecordingExtractor.__init__(self)
        if path is not None:
            if recording is not None:
                raise Exception(
                    'Cannot pass both path and recording to EfficientAccessRecordingExtractor.'
                )
            self._path = path
        elif recording is not None:
            if not hasattr(recording, 'hash'):
                print('''
                Warning: Recording does not have the hash attribute.
                Using sampling method to compute a hash.''')
                setattr(recording, 'hash', _samplehash(recording))
            result = CreateEfficientAccessRecordingFile.execute(
                recording=recording,
                hdf5_out=dict(ext='.hdf5', dest_path=_dest_path))
            path0 = result.outputs['hdf5_out']
            self._path = mt.realizeFile(path=path0)
        else:
            raise Exception('Missing argument: path or recording')

        with h5py.File(self._path, "r") as f:
            self._num_segments = int(np.array(f.get('num_segments'))[0])
            self._segment_size = int(np.array(f.get('segment_size'))[0])
            self._num_channels = int(np.array(f.get('num_channels'))[0])
            self._channel_ids = np.array(f.get('channel_ids')).tolist()
            self._num_timepoints = int(np.array(f.get('num_timepoints'))[0])
            self._samplerate = np.array(f.get('samplerate'))[0]
            self._recording_hash = np.array(
                f.get('recording_hash'))[0].decode()
            assert type(self._recording_hash) == str
            geom = np.array(f.get('geom'))
            channel_locations = [
                geom[m, :].ravel() for m in range(self._num_channels)
            ]
            self.set_channel_locations(channel_ids=self._channel_ids,
                                       locations=channel_locations)
예제 #26
0
파일: Video.py 프로젝트: rgerkin/ephys-viz
    def javascript_state_changed(self, prev_state, state):
        self._set_status('running', 'Running')

        path = state.get('path', None)
        download_from = state.get('download_from', [])
        mt.configDownloadFrom(download_from)

        if not path:
            self._set_status('finished')
            return

        path = mt.realizeFile(path)
        if not path:
            self._set_error('Unable to realize file.')
            return

        with open(path, 'rb') as f:
            video_data = f.read()
        video_data_b64 = base64.b64encode(video_data).decode()
        self.set_python_state(
            dict(video_data_b64=video_data_b64,
                 status='finished',
                 status_message=''))
예제 #27
0
def nwb_to_dict(fname,
                *,
                upload_to=None,
                use_cache=False,
                exclude_data=False,
                verbose=False):
    if fname.endswith('.json'):
        with open(fname, 'r') as f:
            return simplejson.load(f)
    if use_cache:
        result = NWBToDict.execute(h5_in=fname,
                                   upload_to=upload_to or '',
                                   json_out={'ext': '.json'})
        if result.retcode != 0:
            raise Exception('Problem running NWBToDict.')
        return mt.loadObject(path=result.outputs['json_out'])

    fname = mt.realizeFile(path=fname)
    opts = dict(upload_to=upload_to,
                exclude_data=exclude_data,
                verbose=verbose)
    with h5py.File(fname, 'r') as f:
        opts['file'] = f
        return _nwb_to_dict(f, opts=opts, name=None, path='')
예제 #28
0
    def javascript_state_changed(self, prev_state, state):
        self.set_status('running')
        self.set_status_message('Running')

        mt.configDownloadFrom(state.get('download_from', []))
        nwb_query = state.get('nwb_query', None)
        downsample_factor = state.get('downsample_factor', 1)

        if nwb_query:
            self.set_status_message('Loading nwb object')
            obj = _load_nwb_object(nwb_query)
            if not obj:
                self.set_error('Unable to load nwb object')
                return

            self.set_status_message('Loading positions and timestamps from')
            try:
                positions_path = obj['processing']['Behavior']['Position'][
                    'Position']['_datasets']['data']['_data']
                timestamps_path = obj['processing']['Behavior']['Position'][
                    'Position']['_datasets']['timestamps']['_data']
            except:
                self.set_error(
                    'Problem extracting behavior positions or timestamps')
                return
            positions = np.load(mt.realizeFile(path=positions_path))
            positions = positions[::downsample_factor, :]
            timestamps = np.load(mt.realizeFile(path=timestamps_path))
            timestamps = timestamps[::downsample_factor]

            self.set_status_message('Loading spike times')
            try:
                spike_times_path = obj['units']['_datasets']['spike_times'][
                    '_data']
                spike_times_index = obj['units']['_datasets'][
                    'spike_times_index']['_data']
                spike_times_index_id = obj['units']['_datasets']['id']['_data']
                if 'cluster_name' in obj['units']['_datasets']:
                    cluster_names = obj['units']['_datasets']['cluster_name'][
                        '_data']
                else:
                    cluster_names = []
            except:
                self.set_error('Problem extracting spike times')
                return
            spike_times = np.load(mt.realizeFile(path=spike_times_path))

            spike_time_indices = _find_closest(timestamps, spike_times)
            spike_labels = np.zeros(spike_time_indices.shape)
            aa = 0
            for i, val in enumerate(spike_times_index):
                spike_labels[aa:val] = spike_times_index_id[i]
                aa = val

            all_unit_ids = sorted(list(set(spike_labels)))

            state['positions'] = positions
            state['status'] = 'finished'
            state['spike_time_indices'] = spike_time_indices
            state['spike_labels'] = spike_labels
            state['all_unit_ids'] = all_unit_ids
            state['cluster_names'] = cluster_names
            self.set_python_state(state)
            self.set_status('finished')
        else:
            self.set_error('Missing in state: nwb_query')
예제 #29
0
        return None

# studysets_to_include = ['PAIRED_BOYDEN', 'PAIRED_CRCNS_HC1', 'PAIRED_MEA64C_YGER', 'PAIRED_KAMPFF', 'PAIRED_MONOTRODE', 'SYNTH_MONOTRODE', 'SYNTH_MAGLAND', 'SYNTH_MEAREC_NEURONEXUS', 'SYNTH_MEAREC_TETRODE', 'SYNTH_MONOTRODE', 'SYNTH_VISAPY', 'HYBRID_JANELIA', 'MANUAL_FRANKLAB']
studysets_to_include = ['SYNTH_BIONET']
fnames = ['geom.csv', 'params.json', 'raw.mda', 'firings_true.mda']
# fnames = ['geom.csv', 'params.json', 'firings_true.mda']
# fnames = ['geom.csv', 'params.json']
for studyset in X['StudySets']:
    print('STUDYSET: {}'.format(studyset['name']))
    if studyset['name'] in studysets_to_include:
        for study in studyset['studies']:
            study_name = study['name']
            print('STUDY: {}'.format(study_name))
            for recording in study['recordings']:
                recname = recording['name']
                recdir = recording['directory']
                print('RECORDING: {}'.format(recname), recdir)
                sha1 = get_sha1_part_of_sha1dir(recdir)
                if sha1:
                   ff = mt.realizeFile('sha1://' + sha1)
                   print('Storing directory index file: {} for sha1={}'.format(ff, sha1))
                   ka.store_file(ff)
                for fname in fnames:
                    print('Realizing file: {}'.format(recdir + '/' + fname))
                    ff = mt.realizeFile(path=recdir + '/' + fname)
                    if ff:
                        print('Storing file: {}'.format(ff))
                        ka.store_file(ff)
                    else:
                        print('WARNING: could not realize file: {}'.format(recdir + '/' + fname))
예제 #30
0
def compute_mandelbrot_parallel(*,
                                xmin=-2,
                                xmax=0.5,
                                ymin=-1.25,
                                ymax=1.25,
                                num_x=1000,
                                num_iter=1000,
                                num_parallel=1,
                                _force_run=False,
                                _container=None,
                                srun_opts=None,
                                use_slurm=False):
    subsampling_factor = num_parallel
    jobs = []

    job_args = [
        dict(num_x=num_x,
             num_iter=num_iter,
             subsampling_factor=subsampling_factor,
             subsampling_offset=offset,
             output_npy=dict(ext='.npy', upload=True),
             _force_run=_force_run,
             _container=_container,
             _compute_requirements=dict(batch_type='cpu1'))
        for offset in range(subsampling_factor)
    ]

    jobs = ComputeMandelbrot.createJobs(job_args)

    working_dir = 'tmp_slurm_working_dir_' + _random_string(5)
    if use_slurm:
        H = mlpr.SlurmJobHandler(working_dir=working_dir)
        H.addBatchType(name='cpu1',
                       num_workers_per_batch=4,
                       num_cores_per_job=2,
                       use_slurm=False)
    else:
        H = mlpr.ParallelJobHandler(num_workers=num_parallel)
    with mlpr.JobQueue(job_handler=H) as jq:
        results = []
        for job in jobs:
            result0 = job.execute()
            results.append(result0)

        # results = mlpr.executeBatch(jobs=jobs, srun_opts=srun_opts)

        X_list = []
        for result0 in results:
            # result0.wait()
            # X0 = np.load(mt.realizeFile(result0.outputs['output_npy']))
            # X_list.append(X0)
            X_list.append(result0.outputs['output_npy'])
        # X = combine_subsampled_mandelbrot(X_list)
        A = CombineSubsampledMandelbrot.execute(
            num_x=num_x,
            X_list=X_list,
            X_out={'ext': '.npy'},
            _force_run=_force_run,
            _container=_container,
            _compute_requirements=dict(batch_type='cpu1'))

        jq.wait()
        X_path = A.outputs['X_out']
        X = np.load(mt.realizeFile(X_path))
        return X