示例#1
0
    def save(self,
             file_name,
             to32=True,
             order='F',
             imagej=False,
             bigtiff=True,
             excitation_lambda=488.0,
             compress=0,
             q_max=99.75,
             q_min=1,
             var_name_hdf5='mov',
             sess_desc='some_description',
             identifier='some identifier',
             imaging_plane_description='some imaging plane description',
             emission_lambda=520.0,
             indicator='OGB-1',
             location='brain',
             unit='some TwoPhotonSeries unit description',
             starting_time=0.,
             experimenter='Dr Who',
             lab_name=None,
             institution=None,
             experiment_description='Experiment Description',
             session_id='Session ID'):
        """
        Save the timeseries in single precision. Supported formats include
        TIFF, NPZ, AVI, MAT, HDF5/H5, MMAP, and NWB

        Args:
            file_name: str
                name of file. Possible formats are tif, avi, npz, mmap and hdf5

            to32: Bool
                whether to transform to 32 bits

            order: 'F' or 'C'
                C or Fortran order

            var_name_hdf5: str
                Name of hdf5 file subdirectory

            q_max, q_min: float in [0, 100]
                percentile for maximum/minimum clipping value if saving as avi
                (If set to None, no automatic scaling to the dynamic range [0, 255] is performed)

        Raises:
            Exception 'Extension Unknown'

        """
        file_name = caiman.paths.fn_relocated(file_name)
        name, extension = os.path.splitext(
            file_name)[:2]  # name is only used by the memmap saver
        extension = extension.lower()
        logging.debug("Parsing extension " + str(extension))

        if extension in ['.tif', '.tiff', '.btf']:
            with tifffile.TiffWriter(file_name, bigtiff=bigtiff,
                                     imagej=imagej) as tif:
                if "%4d%02d%02d" % tuple(
                        map(int,
                            tifffile.__version__.split('.'))) >= '20200813':

                    def foo(i):
                        if i % 200 == 0:
                            logging.debug(str(i) + ' frames saved')
                        curfr = self[i].copy()
                        if to32 and not ('float32' in str(self.dtype)):
                            curfr = curfr.astype(np.float32)
                        return curfr

                    tif.save([foo(i) for i in range(self.shape[0])],
                             compress=compress)
                else:
                    for i in range(self.shape[0]):
                        if i % 200 == 0:
                            logging.debug(str(i) + ' frames saved')
                        curfr = self[i].copy()
                        if to32 and not ('float32' in str(self.dtype)):
                            curfr = curfr.astype(np.float32)
                        tif.save(curfr, compress=compress)
        elif extension == '.npz':
            if to32 and not ('float32' in str(self.dtype)):
                input_arr = self.astype(np.float32)
            else:
                input_arr = np.array(self)

            np.savez(file_name,
                     input_arr=input_arr,
                     start_time=self.start_time,
                     fr=self.fr,
                     meta_data=self.meta_data,
                     file_name=self.file_name)
        elif extension in ('.avi', '.mkv'):
            codec = None
            try:
                codec = cv2.FOURCC('I', 'Y', 'U', 'V')
            except AttributeError:
                codec = cv2.VideoWriter_fourcc(*'IYUV')
            if q_max is None or q_min is None:
                data = self.astype(np.uint8)
            else:
                if q_max < 100:
                    maxmov = np.nanpercentile(self[::max(1,
                                                         len(self) // 100)],
                                              q_max)
                else:
                    maxmov = np.nanmax(self)
                if q_min > 0:
                    minmov = np.nanpercentile(self[::max(1,
                                                         len(self) // 100)],
                                              q_min)
                else:
                    minmov = np.nanmin(self)
                data = 255 * (self - minmov) / (maxmov - minmov)
                np.clip(data, 0, 255, data)
                data = data.astype(np.uint8)

            y, x = data[0].shape
            vw = cv2.VideoWriter(file_name,
                                 codec,
                                 self.fr, (x, y),
                                 isColor=True)
            for d in data:
                vw.write(cv2.cvtColor(d, cv2.COLOR_GRAY2BGR))
            vw.release()

        elif extension == '.mat':
            if self.file_name[0] is not None:
                f_name = self.file_name
            else:
                f_name = ''

            if to32 and not ('float32' in str(self.dtype)):
                input_arr = self.astype(np.float32)
            else:
                input_arr = np.array(self)

            if self.meta_data[0] is None:
                savemat(
                    file_name, {
                        'input_arr': np.rollaxis(input_arr, axis=0, start=3),
                        'start_time': self.start_time,
                        'fr': self.fr,
                        'meta_data': [],
                        'file_name': f_name
                    })
            else:
                savemat(
                    file_name, {
                        'input_arr': np.rollaxis(input_arr, axis=0, start=3),
                        'start_time': self.start_time,
                        'fr': self.fr,
                        'meta_data': self.meta_data,
                        'file_name': f_name
                    })

        elif extension in ('.hdf5', '.h5'):
            with h5py.File(file_name, "w") as f:
                if to32 and not ('float32' in str(self.dtype)):
                    input_arr = self.astype(np.float32)
                else:
                    input_arr = np.array(self)

                dset = f.create_dataset(var_name_hdf5, data=input_arr)
                dset.attrs["fr"] = self.fr
                dset.attrs["start_time"] = self.start_time
                try:
                    dset.attrs["file_name"] = [
                        a.encode('utf8') for a in self.file_name
                    ]
                except:
                    logging.warning('No file saved')
                if self.meta_data[0] is not None:
                    logging.debug("Metadata for saved file: " +
                                  str(self.meta_data))
                    dset.attrs["meta_data"] = cpk.dumps(self.meta_data)
        elif extension == '.mmap':
            base_name = name

            T = self.shape[0]
            dims = self.shape[1:]
            if to32 and not ('float32' in str(self.dtype)):
                input_arr = self.astype(np.float32)
            else:
                input_arr = np.array(self)

            input_arr = np.transpose(input_arr,
                                     list(range(1,
                                                len(dims) + 1)) + [0])
            input_arr = np.reshape(input_arr, (np.prod(dims), T), order='F')

            fname_tot = caiman.paths.memmap_frames_filename(
                base_name, dims, T, order)
            fname_tot = os.path.join(os.path.split(file_name)[0], fname_tot)
            big_mov = np.memmap(fname_tot,
                                mode='w+',
                                dtype=np.float32,
                                shape=(np.uint64(np.prod(dims)), np.uint64(T)),
                                order=order)

            big_mov[:] = np.asarray(input_arr, dtype=np.float32)
            big_mov.flush()
            del big_mov, input_arr
            return fname_tot
        elif extension == '.nwb':
            if to32 and not ('float32' in str(self.dtype)):
                input_arr = self.astype(np.float32)
            else:
                input_arr = np.array(self)
            # Create NWB file
            nwbfile = NWBFile(sess_desc,
                              identifier,
                              datetime.now(tzlocal()),
                              experimenter=experimenter,
                              lab=lab_name,
                              institution=institution,
                              experiment_description=experiment_description,
                              session_id=session_id)
            # Get the device
            device = Device('imaging_device')
            nwbfile.add_device(device)
            # OpticalChannel
            optical_channel = OpticalChannel('OpticalChannel',
                                             'main optical channel',
                                             emission_lambda=emission_lambda)
            imaging_plane = nwbfile.create_imaging_plane(
                name='ImagingPlane',
                optical_channel=optical_channel,
                description=imaging_plane_description,
                device=device,
                excitation_lambda=excitation_lambda,
                imaging_rate=self.fr,
                indicator=indicator,
                location=location)
            # Images
            image_series = TwoPhotonSeries(name=var_name_hdf5,
                                           dimension=self.shape[1:],
                                           data=input_arr,
                                           unit=unit,
                                           imaging_plane=imaging_plane,
                                           starting_frame=[0],
                                           starting_time=starting_time,
                                           rate=self.fr)

            nwbfile.add_acquisition(image_series)

            with NWBHDF5IO(file_name, 'w') as io:
                io.write(nwbfile)

            return file_name

        else:
            logging.error("Extension " + str(extension) + " unknown")
            raise Exception('Extension Unknown')
示例#2
0
    def write_segmentation(segext_obj,
                           save_path,
                           plane_num=0,
                           metadata=None,
                           overwrite=True):
        save_path = Path(save_path)
        assert save_path.suffix == '.nwb'
        if save_path.is_file() and not overwrite:
            nwbfile_exist = True
            file_mode = 'r+'
        else:
            if save_path.is_file():
                os.remove(save_path)
            if not save_path.parent.is_dir():
                save_path.parent.mkdir(parents=True)
            nwbfile_exist = False
            file_mode = 'w'

        # parse metadata correctly:
        if isinstance(segext_obj, MultiSegmentationExtractor):
            segext_objs = segext_obj.segmentations
            if metadata is not None:
                assert isinstance(metadata, list), "For MultiSegmentationExtractor enter 'metadata' as a list of " \
                                                   "SegmentationExtractor metadata"
                assert len(metadata) == len(segext_objs), "The 'metadata' argument should be a list with the same " \
                                                          "number of elements as the segmentations in the " \
                                                          "MultiSegmentationExtractor"
        else:
            segext_objs = [segext_obj]
            if metadata is not None and not isinstance(metadata, list):
                metadata = [metadata]
        metadata_base_list = [
            NwbSegmentationExtractor.get_nwb_metadata(sgobj)
            for sgobj in segext_objs
        ]

        print(f'writing nwb for {segext_obj.extractor_name}\n')
        # updating base metadata with new:
        for num, data in enumerate(metadata_base_list):
            metadata_input = metadata[num] if metadata else {}
            metadata_base_list[num] = dict_recursive_update(
                metadata_base_list[num], metadata_input)
        # loop for every plane:
        with NWBHDF5IO(str(save_path), file_mode) as io:
            metadata_base_common = metadata_base_list[0]
            if nwbfile_exist:
                nwbfile = io.read()
            else:
                nwbfile = NWBFile(**metadata_base_common['NWBFile'])
                # Subject:
                if metadata_base_common.get('Subject'):
                    nwbfile.subject = Subject(
                        **metadata_base_common['Subject'])

            # Processing Module:
            if 'ophys' not in nwbfile.processing:
                ophys = nwbfile.create_processing_module(
                    'ophys', 'contains optical physiology processed data')
            else:
                ophys = nwbfile.get_processing_module('ophys')

            for plane_no_loop, (segext_obj, metadata) in enumerate(
                    zip(segext_objs, metadata_base_list)):
                # Device:
                if metadata['Ophys']['Device'][0][
                        'name'] not in nwbfile.devices:
                    nwbfile.create_device(**metadata['Ophys']['Device'][0])

                # ImageSegmentation:
                image_segmentation_name = 'ImageSegmentation' if plane_no_loop == 0 else f'ImageSegmentation_Plane{plane_no_loop}'
                if image_segmentation_name not in ophys.data_interfaces:
                    image_segmentation = ImageSegmentation(
                        name=image_segmentation_name)
                    ophys.add_data_interface(image_segmentation)
                else:
                    image_segmentation = ophys.data_interfaces.get(
                        image_segmentation_name)

                # OpticalChannel:
                optical_channels = [
                    OpticalChannel(**i) for i in metadata['Ophys']
                    ['ImagingPlane'][0]['optical_channel']
                ]

                # ImagingPlane:
                image_plane_name = 'ImagingPlane' if plane_no_loop == 0 else f'ImagePlane_{plane_no_loop}'
                if image_plane_name not in nwbfile.imaging_planes.keys():
                    input_kwargs = dict(
                        name=image_plane_name,
                        device=nwbfile.get_device(metadata_base_common['Ophys']
                                                  ['Device'][0]['name']),
                    )
                    metadata['Ophys']['ImagingPlane'][0][
                        'optical_channel'] = optical_channels
                    input_kwargs.update(**metadata['Ophys']['ImagingPlane'][0])
                    if 'imaging_rate' in input_kwargs:
                        input_kwargs['imaging_rate'] = float(
                            input_kwargs['imaging_rate'])
                    imaging_plane = nwbfile.create_imaging_plane(
                        **input_kwargs)
                else:
                    imaging_plane = nwbfile.imaging_planes[image_plane_name]

                # PlaneSegmentation:
                input_kwargs = dict(
                    description='output from segmenting imaging plane',
                    imaging_plane=imaging_plane)
                ps_metadata = metadata['Ophys']['ImageSegmentation'][
                    'plane_segmentations'][0]
                if ps_metadata[
                        'name'] not in image_segmentation.plane_segmentations:
                    input_kwargs.update(**ps_metadata)
                    ps = image_segmentation.create_plane_segmentation(
                        **input_kwargs)
                    ps_exist = False
                else:
                    ps = image_segmentation.get_plane_segmentation(
                        ps_metadata['name'])
                    ps_exist = True

                # ROI add:
                image_masks = segext_obj.get_roi_image_masks()
                roi_ids = segext_obj.get_roi_ids()
                accepted_list = segext_obj.get_accepted_list()
                accepted_list = [] if accepted_list is None else accepted_list
                rejected_list = segext_obj.get_rejected_list()
                rejected_list = [] if rejected_list is None else rejected_list
                accepted_ids = [
                    1 if k in accepted_list else 0 for k in roi_ids
                ]
                rejected_ids = [
                    1 if k in rejected_list else 0 for k in roi_ids
                ]
                roi_locations = np.array(segext_obj.get_roi_locations()).T
                if not ps_exist:
                    ps.add_column(
                        name='RoiCentroid',
                        description=
                        'x,y location of centroid of the roi in image_mask')
                    ps.add_column(
                        name='Accepted',
                        description=
                        '1 if ROi was accepted or 0 if rejected as a cell during segmentation operation'
                    )
                    ps.add_column(
                        name='Rejected',
                        description=
                        '1 if ROi was rejected or 0 if accepted as a cell during segmentation operation'
                    )
                    for num, row in enumerate(roi_ids):
                        ps.add_roi(id=row,
                                   image_mask=image_masks[:, :, num],
                                   RoiCentroid=roi_locations[num, :],
                                   Accepted=accepted_ids[num],
                                   Rejected=rejected_ids[num])

                # Fluorescence Traces:
                if 'Flourescence' not in ophys.data_interfaces:
                    fluorescence = Fluorescence()
                    ophys.add_data_interface(fluorescence)
                else:
                    fluorescence = ophys.data_interfaces['Fluorescence']
                roi_response_dict = segext_obj.get_traces_dict()
                roi_table_region = ps.create_roi_table_region(
                    description=f'region for Imaging plane{plane_no_loop}',
                    region=list(range(segext_obj.get_num_rois())))
                rate = np.float('NaN') if segext_obj.get_sampling_frequency(
                ) is None else segext_obj.get_sampling_frequency()
                for i, j in roi_response_dict.items():
                    data = getattr(segext_obj, f'_roi_response_{i}')
                    if data is not None:
                        data = np.asarray(data)
                        trace_name = 'RoiResponseSeries' if i == 'raw' else i.capitalize(
                        )
                        trace_name = trace_name if plane_no_loop == 0 else trace_name + f'_Plane{plane_no_loop}'
                        input_kwargs = dict(name=trace_name,
                                            data=data.T,
                                            rois=roi_table_region,
                                            rate=rate,
                                            unit='n.a.')
                        if trace_name not in fluorescence.roi_response_series:
                            fluorescence.create_roi_response_series(
                                **input_kwargs)

                # create Two Photon Series:
                if 'TwoPhotonSeries' not in nwbfile.acquisition:
                    warn(
                        'could not find TwoPhotonSeries, using ImagingExtractor to create an nwbfile'
                    )

                # adding images:
                images_dict = segext_obj.get_images_dict()
                if any([image is not None for image in images_dict.values()]):
                    images_name = 'SegmentationImages' if plane_no_loop == 0 else f'SegmentationImages_Plane{plane_no_loop}'
                    if images_name not in ophys.data_interfaces:
                        images = Images(images_name)
                        for img_name, img_no in images_dict.items():
                            if img_no is not None:
                                images.add_image(
                                    GrayscaleImage(name=img_name, data=img_no))
                        ophys.add(images)

            # saving NWB file:
            io.write(nwbfile)

        # test read
        with NWBHDF5IO(str(save_path), 'r') as io:
            io.read()
示例#3
0
文件: ophys.py 项目: campagnola/pynwb
    datetime.now(),
    experimenter='Dr. Bilbo Baggins',
    lab='Bag End Laboratory',
    institution='University of Middle Earth at the Shire',
    experiment_description=('I went on an adventure with thirteen '
                            'dwarves to reclaim vast treasures.'),
    session_id='LONELYMTN')

####################
# Adding metadata about acquisition
# ---------------------------------
#
# Before you can add your data, you will need to provide some information about how that data was generated.
# This amounts describing the imaging plane and the optical channel used.

optical_channel = OpticalChannel('my_optchan', 'Ca2+ imaging example',
                                 'pi wavelength', '3.14')
imaging_plane = nwbfile.create_imaging_plane(
    'my_imgpln', 'Ca2+ imaging example', optical_channel,
    'a very interesting part of the brain', 'imaging_device_1', '6.28',
    '2.718', 'GFP', 'my favorite brain location', (1, 2, 1, 2, 3), 4.0,
    'manifold unit', 'A frame to refer to')

####################
# Adding two-photon image data
# ----------------------------
#
# Now that you have your :py:class:`~pynwb.ophys.ImagingPlane`, you can create a
# :py:class:`~pynwb.ophys.TwoPhotonSeries` - the class representing two photon imaging data.
#
# From here you have two options. The first option is to supply the image data to PyNWB, using the `data` argument.
# The other option is the provide a path the images. These two options have trade-offs, so it is worth spending time
示例#4
0
    def add_two_photon_series(imaging, nwbfile, metadata, num_chunks=10):
        """
        Auxiliary static method for nwbextractor.
        Adds two photon series from imaging object as TwoPhotonSeries to nwbfile object.
        """
        if 'Ophys' not in metadata:
            metadata['Ophys'] = {}

        if 'TwoPthotonSeries' not in metadata['Ophys']:
            metadata['Ophys']['TwoPhotonSeries'] = [{
                'name':
                'TwoPhotonSeries',
                'description':
                'optical_series_description'
            }]
        # Tests if ElectricalSeries already exists in acquisition
        nwb_es_names = [ac for ac in nwbfile.acquisition]
        opts = metadata['Ophys']['TwoPhotonSeries'][0]
        if opts['name'] not in nwb_es_names:
            # retrieve device
            device = nwbfile.devices[list(nwbfile.devices.keys())[0]]

            # create optical channel
            if 'OpticalChannel' not in metadata['Ophys']:
                metadata['Ophys']['OpticalChannel'] = [{
                    'name':
                    'OpticalChannel',
                    'description':
                    'no description',
                    'emission_lambda':
                    np.nan
                }]

            optical_channel = OpticalChannel(
                **metadata['Ophys']['OpticalChannel'][0])
            # sampling rate
            rate = float(imaging.get_sampling_frequency())

            if 'ImagingPlane' not in metadata['Ophys']:
                metadata['Ophys']['ImagingPlane'] = [{
                    'name': 'ImagingPlane',
                    'description': 'no description',
                    'excitation_lambda': np.nan,
                    'indicator': 'unknown',
                    'location': 'unknown'
                }]
            imaging_meta = {
                'optical_channel': optical_channel,
                'imaging_rate': rate,
                'device': device
            }
            metadata['Ophys']['ImagingPlane'][0] = update_dict(
                metadata['Ophys']['ImagingPlane'][0], imaging_meta)

            imaging_plane = nwbfile.create_imaging_plane(
                **metadata['Ophys']['ImagingPlane'][0])

            def data_generator(imaging, num_chunks):
                num_frames = imaging.get_num_frames()
                # chunk size is not None
                chunk_size = num_frames // num_chunks
                if num_frames % chunk_size > 0:
                    num_chunks += 1
                for i in range(num_chunks):
                    video = imaging.get_video(start_frame=i * chunk_size,
                                              end_frame=min(
                                                  (i + 1) * chunk_size,
                                                  num_frames))
                    data = np.squeeze(video)
                    yield data

            data = H5DataIO(DataChunkIterator(
                data_generator(imaging, num_chunks)),
                            compression=True)
            acquisition_name = opts['name']

            # using internal data. this data will be stored inside the NWB file
            ophys_ts = TwoPhotonSeries(
                name=acquisition_name,
                data=data,
                imaging_plane=imaging_plane,
                rate=rate,
                unit='normalized amplitude',
                comments='Generated from RoiInterface::NwbImagingExtractor',
                description='no description')

            nwbfile.add_acquisition(ophys_ts)

        return nwbfile
示例#5
0
                      tags='trials')

for _, row in epoch_table.iterrows():
    nwbfile.add_epoch(start_time=row.start,
                      stop_time=row.end,
                      timeseries=[running_speed],
                      tags='stimulus')

########################################
# 5) In the brain observatory, a two-photon microscope is used to acquire images of the calcium activity of neurons
# expressing a fluorescent protien indicator.  Essentially the microscope captures picture (30 times a second) at a
# single depth in the visual cortex (the imaging plane).  Let's use pynwb to store the metadata associated with this
# hardware and experimental setup:
optical_channel = OpticalChannel(
    name='optical_channel',
    description='2P Optical Channel',
    emission_lambda=520.,
)

device = Device(metadata['device'])
nwbfile.add_device(device)

imaging_plane = nwbfile.create_imaging_plane(
    name='imaging_plane',
    optical_channel=optical_channel,
    description='Imaging plane ',
    device=device,
    excitation_lambda=float(metadata['excitation_lambda'].split(' ')[0]),
    imaging_rate=30.,
    indicator='GCaMP6f',
    location=metadata['targeted_structure'],
示例#6
0
def add_ophys_processing_from_suite2p(save_folder, nwbfile, xml, 
                                      device=None,
                                      optical_channel=None,
                                      imaging_plane=None,
                                      image_series=None):
    """ 
    adapted from suite2p/suite2p/io/nwb.py "save_nwb" function
    """

    plane_folders = natsorted([ f.path for f in os.scandir(save_folder) if f.is_dir() and f.name[:5]=='plane'])
    OPS = [np.load(os.path.join(f, 'ops.npy'), allow_pickle=True).item() for f in plane_folders]

    if len(OPS)>1:
        multiplane, nplanes = True, len(plane_folders)
        pData_folder = os.path.join(save_folder, 'combined') # processed data folder -> using the "combined output from suite2p"
    else:
        multiplane, nplanes = False, 1
        pData_folder = os.path.join(save_folder, 'plane0') # processed data folder

    # find time sampling per plane
    functional_chan = ('Ch1' if len(xml['Ch1']['relativeTime'])>1 else 'Ch2') # functional channel is one of the two !!
    CaImaging_timestamps = xml[functional_chan]['relativeTime']+float(xml['settings']['framePeriod'])/2.

    ops = np.load(os.path.join(pData_folder, 'ops.npy'), allow_pickle=True).item() 
    
    if device is None:
        device = nwbfile.create_device(
            name='Microscope', 
            description='My two-photon microscope',
            manufacturer='The best microscope manufacturer')
    if optical_channel is None:
        optical_channel = OpticalChannel(
            name='OpticalChannel', 
            description='an optical channel', 
            emission_lambda=500.)
    if imaging_plane is None:
        imaging_plane = nwbfile.create_imaging_plane(
            name='ImagingPlane',
            optical_channel=optical_channel,
            imaging_rate=ops['fs'],
            description='standard',
            device=device,
            excitation_lambda=600.,
            indicator='GCaMP',
            location='V1',
            grid_spacing=([2,2,30] if multiplane else [2,2]),
            grid_spacing_unit='microns')

    if image_series is None:
        # link to external data
        image_series = TwoPhotonSeries(
            name='TwoPhotonSeries', 
            dimension=[ops['Ly'], ops['Lx']],
            external_file=(ops['filelist'] if 'filelist' in ops else ['']), 
            imaging_plane=imaging_plane,
            starting_frame=[0], 
            format='external', 
            starting_time=0.0, 
            rate=ops['fs'] * ops['nplanes']
        )
        nwbfile.add_acquisition(image_series) # otherwise, were added

    # processing
    img_seg = ImageSegmentation()
    ps = img_seg.create_plane_segmentation(
        name='PlaneSegmentation',
        description='suite2p output',
        imaging_plane=imaging_plane,
        reference_images=image_series
    )
    ophys_module = nwbfile.create_processing_module(
        name='ophys', 
        description='optical physiology processed data\n TSeries-folder=%s' % save_folder)
    ophys_module.add(img_seg)

    file_strs = ['F.npy', 'Fneu.npy', 'spks.npy']
    traces = []

    iscell = np.load(os.path.join(pData_folder, 'iscell.npy')).astype(bool)

    if ops['nchannels']>1:
        if os.path.isfile(os.path.join(pData_folder, 'redcell_manual.npy')):
            redcell = np.load(os.path.join(pData_folder, 'redcell_manual.npy'))[iscell[:,0], :]
        else:
            print('\n'+30*'--')
            print(' /!\ no file found for the manual labelling of red cells (generate it with the red-cell labelling GUI) /!\ ')
            print(' /!\ taking the raw suit2p output with the classifier settings /!\ ')
            print('\n'+30*'--')
            redcell = np.load(os.path.join(pData_folder, 'redcell.npy'))[iscell[:,0], :]
            
    for fstr in file_strs:
        traces.append(np.load(os.path.join(pData_folder, fstr))[iscell[:,0], :])
        
    stat = np.load(os.path.join(pData_folder, 'stat.npy'), allow_pickle=True)

    ncells = np.sum(iscell[:,0])
    plane_ID = np.zeros(ncells)
    for n in np.arange(ncells):
        pixel_mask = np.array([stat[iscell[:,0]][n]['ypix'], stat[iscell[:,0]][n]['xpix'], 
                               stat[iscell[:,0]][n]['lam']])
        ps.add_roi(pixel_mask=pixel_mask.T)
        if 'iplane' in stat[0]:
            plane_ID[n] = stat[iscell[:,0]][n]['iplane']

    if ops['nchannels']>1:
        ps.add_column('redcell', 'two columns - redcell & probcell', redcell)
    ps.add_column('plane', 'one column - plane ID', plane_ID)

    rt_region = ps.create_roi_table_region(
        region=list(np.arange(0, ncells)),
        description='all ROIs')

    # FLUORESCENCE (all are required)
    file_strs = ['F.npy', 'Fneu.npy', 'spks.npy']
    name_strs = ['Fluorescence', 'Neuropil', 'Deconvolved']

    for i, (fstr,nstr) in enumerate(zip(file_strs, name_strs)):
        roi_resp_series = RoiResponseSeries(
            name=nstr,
            data=traces[i],
            rois=rt_region,
            unit='lumens',
            timestamps=CaImaging_timestamps[::nplanes]) # ideally should be shifted for each ROI depending on the plane...
        fl = Fluorescence(roi_response_series=roi_resp_series, name=nstr)
        ophys_module.add(fl)

    # BACKGROUNDS
    # (meanImg, Vcorr and max_proj are REQUIRED)
    bg_strs = ['meanImg', 'meanImgE', 'Vcorr', 'max_proj', 'meanImg_chan2']
    nplanes = ops['nplanes']
    for iplane in range(nplanes):
        images = Images('Backgrounds_%d'%iplane)
        for bstr in bg_strs:
            if bstr in ops:
                if bstr=='Vcorr' or bstr=='max_proj':
                    img = np.zeros((ops['Ly'], ops['Lx']), np.float32)
                    img[ops['yrange'][0]:ops['yrange'][-1], 
                        ops['xrange'][0]:ops['xrange'][-1]] = ops[bstr]
                else:
                    img = ops[bstr]
                images.add_image(GrayscaleImage(name=bstr, data=img))

        ophys_module.add(images)
示例#7
0
 def set_up_dependencies(self):
     oc = OpticalChannel(name='test_optical_channel',
                         description='description',
                         emission_lambda=500.)
     device = Device(name='device_name')
     return oc, device
示例#8
0
 def addContainer(self, file):
     dev1 = file.create_device('imaging_device_1', 'dev1 description')
     oc = OpticalChannel('optchan1', 'a fake OpticalChannel', 3.14)
     ip = file.create_imaging_plane('imgpln1', oc, 'a fake ImagingPlane',
                                    dev1, 6.28, 2.718, 'GFP', 'somewhere in the brain')
     return ip