예제 #1
0
    def test_5_1_multifile(self):
        scan = scanreader.read_scan(stack_file_5_1_multifiles)

        # Test it is iterable
        for i, field in enumerate(scan):
            self.assertEqual(field.shape, (512, 512, 2, 25))
        self.assertEqual(i, 69)  # 70 fields

        # Test it can be obtained as array
        scan_as_array = np.array(scan)
        self.assertEqualShapeAndSum(scan_as_array, (70, 512, 512, 2, 25),
                                    2021813090863)

        # Test indexation
        first_field = scan[0, :, :, :, :]
        self.assertEqualShapeAndSum(first_field, (512, 512, 2, 25),
                                    27836374986)
        first_row = scan[:, 0, :, :, :]
        self.assertEqualShapeAndSum(first_row, (70, 512, 2, 25), 3294545077)
        first_column = scan[:, :, 0, :, :]
        self.assertEqualShapeAndSum(first_column, (70, 512, 2, 25), 885838245)
        first_channel = scan[:, :, :, 0, :]
        self.assertEqualShapeAndSum(first_channel, (70, 512, 512, 25),
                                    1832276046863)
        first_frame = scan[:, :, :, :, 0]
        self.assertEqualShapeAndSum(first_frame, (70, 512, 512, 2),
                                    79887927681)
예제 #2
0
    def load(self, key):
        from pipeline.utils import galvo_corrections

        # load
        print("Loading scan", flush=True)
        reader = scanreader.read_scan(
            (experiment.Scan() & key).local_filenames_as_wildcard)
        scan = reader[key["field"] - 1, :, :,
                      key["channel"] - 1].astype(np.float32)

        # raster correction
        print("Raster correction", flush=True)
        pipe = (fuse.MotionCorrection() & key).module
        raster_phase = (pipe.RasterCorrection() & key).fetch1("raster_phase")
        fill_fraction = (pipe.ScanInfo() & key).fetch1("fill_fraction")
        scan = galvo_corrections.correct_raster(scan, raster_phase,
                                                fill_fraction)

        # motion correction
        print("Motion correction", flush=True)
        x_shifts, y_shifts = (pipe.MotionCorrection() & key).fetch1(
            "x_shifts", "y_shifts")
        scan = galvo_corrections.correct_motion(scan, x_shifts, y_shifts)

        return scan, reader.num_scanning_depths
예제 #3
0
    def test_5_1(self):
        scan = scanreader.read_scan(stack_file_5_1)

        # Test it is iterable
        for i, field in enumerate(scan):
            self.assertEqual(field.shape, (512, 512, 2, 25))
        self.assertEqual(i, 59)  # 60 fields

        # Test it can be obtained as array
        scan_as_array = np.array(scan)
        self.assertEqualShapeAndSum(scan_as_array, (60, 512, 512, 2, 25),
                                    1766199881650)

        # Test indexation
        first_field = scan[0, :, :, :, :]
        self.assertEqualShapeAndSum(first_field, (512, 512, 2, 25),
                                    27836374986)
        first_row = scan[:, 0, :, :, :]
        self.assertEqualShapeAndSum(first_row, (60, 512, 2, 25), 2838459027)
        first_column = scan[:, :, 0, :, :]
        self.assertEqualShapeAndSum(first_column, (60, 512, 2, 25), 721241569)
        first_channel = scan[:, :, :, 0, :]
        self.assertEqualShapeAndSum(first_channel, (60, 512, 512, 25),
                                    1649546136958)
        first_frame = scan[:, :, :, :, 0]
        self.assertEqualShapeAndSum(first_frame, (60, 512, 512, 2),
                                    69769537416)
예제 #4
0
    def test_2016b_multiroi_hard(self):
        scan = scanreader.read_scan(scan_file_2016b_multiroi_hard)

        # Test it is iterable
        fields_shapes = [(800, 512, 2, 10), (800, 512, 2, 10),
                         (512, 512, 2, 10), (512, 512, 2, 10)]
        fields_sum = [2248989268, 2238433858, 1496780320, 1444886093]
        for i, field in enumerate(scan):
            self.assertEqualShapeAndSum(field, fields_shapes[i], fields_sum[i])

        # Test it can NOT be obtained as array
        self.assertRaises(ScanReaderException, lambda: np.array(scan))

        # Test indexation
        first_field = scan[0, :, :, :, :]
        self.assertEqualShapeAndSum(first_field, (800, 512, 2, 10), 2248989268)
        first_row = scan[:, 0, :, :, :]
        self.assertEqualShapeAndSum(first_row, (4, 512, 2, 10), 10999488)
        self.assertRaises(ScanReaderException, lambda: scan[:, :, 0, :, :])
        self.assertRaises(ScanReaderException, lambda: scan[:, :, :, 0, :])
        self.assertRaises(ScanReaderException, lambda: scan[:, :, :, :, 0])

        # Test indexation for last two slices
        first_column = scan[-2:, :, 0, :, :]
        self.assertEqualShapeAndSum(first_column, (2, 512, 2, 10), 3436369)
        first_channel = scan[-2:, :, :, 0, :]
        self.assertEqualShapeAndSum(first_channel, (2, 512, 512, 10),
                                    2944468254)
        first_frame = scan[-2:, :, :, :, 0]
        self.assertEqualShapeAndSum(first_frame, (2, 512, 512, 2), 290883684)
예제 #5
0
    def test_2018a_multiroi(self):
        scan = scanreader.read_scan(scan_file_2018a_multiroi)

        # Test it is iterable
        fields_sum = [
            958342536, 872406772, 565935414, 190064269, -313625001, 871668983,
            803397461, 518333983, 132827834, -330521191, 537668374, 510901435,
            295330281, -64869402, -381772187
        ]
        for i, field in enumerate(scan):
            self.assertEqualShapeAndSum(field, (512, 512, 1, 159),
                                        fields_sum[i])

        # Test it can be obtained as array
        scan_as_array = np.array(scan)
        self.assertEqualShapeAndSum(scan_as_array, (15, 512, 512, 1, 159),
                                    5166089561)

        # Test indexation
        first_field = scan[0, :, :, :, :]
        self.assertEqualShapeAndSum(first_field, (512, 512, 1, 159), 958342536)
        first_row = scan[:, 0, :, :, :]
        self.assertEqualShapeAndSum(first_row, (15, 512, 1, 159), -20472715)
        first_column = scan[:, :, 0, :, :]
        self.assertEqualShapeAndSum(first_column, (15, 512, 1, 159), -8517112)
        first_channel = scan[:, :, :, 0, :]
        self.assertEqualShapeAndSum(first_channel, (15, 512, 512, 159),
                                    5166089561)
        first_frame = scan[:, :, :, :, 0]
        self.assertEqualShapeAndSum(first_frame, (15, 512, 512, 1), 23273401)
예제 #6
0
    def test_2016b_multiroi(self):
        scan = scanreader.read_scan(scan_file_2016b_multiroi)

        # Test it is iterable
        fields_sum = [
            10437019861, 8288826827, 8590264328, 6532028278, 7713680015,
            6058542598, 7171244110, 5541391024, 6386669378, 4886799974
        ]
        for i, field in enumerate(scan):
            self.assertEqualShapeAndSum(field, (500, 250, 1, 100),
                                        fields_sum[i])

        # Test it can be obtained as array
        scan_as_array = np.array(scan)
        self.assertEqualShapeAndSum(scan_as_array, (10, 500, 250, 1, 100),
                                    71606466393)

        # Test indexation
        first_field = scan[0, :, :, :, :]
        self.assertEqualShapeAndSum(first_field, (500, 250, 1, 100),
                                    10437019861)
        first_row = scan[:, 0, :, :, :]
        self.assertEqualShapeAndSum(first_row, (10, 250, 1, 100), 147185283)
        first_column = scan[:, :, 0, :, :]
        self.assertEqualShapeAndSum(first_column, (10, 500, 1, 100), 224378620)
        first_channel = scan[:, :, :, 0, :]
        self.assertEqualShapeAndSum(first_channel, (10, 500, 250, 100),
                                    71606466393)
        first_frame = scan[:, :, :, :, 0]
        self.assertEqualShapeAndSum(first_frame, (10, 500, 250, 1), 663727054)
예제 #7
0
    def test_2016b_multiroi_multifile(self):
        scan = scanreader.read_scan(scan_file_2016b_multiroi_multifiles)

        # Test it is iterable
        fields_sum = [
            20522111917, 16488768331, 16895482228, 13022673521, 15193380706,
            12066890926, 14094412675, 11043585631, 12549291755, 9747543988
        ]
        for i, field in enumerate(scan):
            self.assertEqualShapeAndSum(field, (500, 250, 1, 200),
                                        fields_sum[i])

        # Test it can be obtained as array
        scan_as_array = np.array(scan)
        self.assertEqualShapeAndSum(scan_as_array, (10, 500, 250, 1, 200),
                                    141624141678)

        # Test indexation
        first_field = scan[0, :, :, :, :]
        self.assertEqualShapeAndSum(first_field, (500, 250, 1, 200),
                                    20522111917)
        first_row = scan[:, 0, :, :, :]
        self.assertEqualShapeAndSum(first_row, (10, 250, 1, 200), 291067934)
        first_column = scan[:, :, 0, :, :]
        self.assertEqualShapeAndSum(first_column, (10, 500, 1, 200), 442948597)
        first_channel = scan[:, :, :, 0, :]
        self.assertEqualShapeAndSum(first_channel, (10, 500, 250, 200),
                                    141624141678)
        first_frame = scan[:, :, :, :, 0]
        self.assertEqualShapeAndSum(first_frame, (10, 500, 250, 1), 663727054)
예제 #8
0
    def test_2020(self):
        scan = scanreader.read_scan(scan_file_2020)

        # Test it is iterable
        fields_sum = [-24781107]
        for i, field in enumerate(scan):
            self.assertEqualShapeAndSum(field, (256, 256, 2, 50),
                                        fields_sum[i])

        # Test it can be obtained as array
        scan_as_array = np.array(scan)
        self.assertEqualShapeAndSum(scan_as_array, (1, 256, 256, 2, 50),
                                    -24781107)

        # Test indexation
        first_field = scan[0, :, :, :, :]
        self.assertEqualShapeAndSum(first_field, (256, 256, 2, 50), -24781107)
        first_row = scan[:, 0, :, :, :]
        self.assertEqualShapeAndSum(first_row, (1, 256, 2, 50), -107604)
        first_column = scan[:, :, 0, :, :]
        self.assertEqualShapeAndSum(first_column, (1, 256, 2, 50), -94825)
        first_channel = scan[:, :, :, 0, :]
        self.assertEqualShapeAndSum(first_channel, (1, 256, 256, 50),
                                    -15753120)
        first_frame = scan[:, :, :, :, 0]
        self.assertEqualShapeAndSum(first_frame, (1, 256, 256, 2), -466022)
예제 #9
0
    def test_5_1_multifile(self):
        scan = scanreader.read_scan(scan_file_5_1_multifiles)

        # Test it is iterable
        fields_sum = [163553755531, 171473993442, 180238513125]
        for i, field in enumerate(scan):
            self.assertEqualShapeAndSum(field, (256, 256, 2, 1500),
                                        fields_sum[i])

        # Test it can be obtained as array
        scan_as_array = np.array(scan)
        self.assertEqualShapeAndSum(scan_as_array, (3, 256, 256, 2, 1500),
                                    515266262098)

        # Test indexation
        first_field = scan[0, :, :, :, :]
        self.assertEqualShapeAndSum(first_field, (256, 256, 2, 1500),
                                    163553755531)
        first_row = scan[:, 0, :, :, :]
        self.assertEqualShapeAndSum(first_row, (3, 256, 2, 1500), 1328396733)
        first_column = scan[:, :, 0, :, :]
        self.assertEqualShapeAndSum(first_column, (3, 256, 2, 1500), 734212945)
        first_channel = scan[:, :, :, 0, :]
        self.assertEqualShapeAndSum(first_channel, (3, 256, 256, 1500),
                                    487380452100)
        first_frame = scan[:, :, :, :, 0]
        self.assertEqualShapeAndSum(first_frame, (3, 256, 256, 2), 337564522)
예제 #10
0
    def test_5_3(self):
        scan = scanreader.read_scan(scan_file_5_3)

        # Test it is iterable
        fields_sum = [1471837154]
        for i, field in enumerate(scan):
            self.assertEqualShapeAndSum(field, (256, 256, 2, 21),
                                        fields_sum[i])

        # Test it can be obtained as array
        scan_as_array = np.array(scan)
        self.assertEqualShapeAndSum(scan_as_array, (1, 256, 256, 2, 21),
                                    1471837154)

        # Test indexation
        first_field = scan[0, :, :, :, :]
        self.assertEqualShapeAndSum(first_field, (256, 256, 2, 21), 1471837154)
        first_row = scan[:, 0, :, :, :]
        self.assertEqualShapeAndSum(first_row, (1, 256, 2, 21), 5749516)
        first_column = scan[:, :, 0, :, :]
        self.assertEqualShapeAndSum(first_column, (1, 256, 2, 21), 5749625)
        first_channel = scan[:, :, :, 0, :]
        self.assertEqualShapeAndSum(first_channel, (1, 256, 256, 21),
                                    923762774)
        first_frame = scan[:, :, :, :, 0]
        self.assertEqualShapeAndSum(first_frame, (1, 256, 256, 2), 70090210)
예제 #11
0
    def test_5_2(self):
        scan = scanreader.read_scan(scan_file_5_2)

        # Test it is iterable
        fields_sum = [165077647124, 150775776929, 176081992915]
        for i, field in enumerate(scan):
            self.assertEqualShapeAndSum(field, (512, 512, 2, 366),
                                        fields_sum[i])

        # Test it can be obtained as array
        scan_as_array = np.array(scan)
        self.assertEqualShapeAndSum(scan_as_array, (3, 512, 512, 2, 366),
                                    491935416968)

        # Test indexation
        first_field = scan[0, :, :, :, :]
        self.assertEqualShapeAndSum(first_field, (512, 512, 2, 366),
                                    165077647124)
        first_row = scan[:, 0, :, :, :]
        self.assertEqualShapeAndSum(first_row, (3, 512, 2, 366), 879446899)
        first_column = scan[:, :, 0, :, :]
        self.assertEqualShapeAndSum(first_column, (3, 512, 2, 366), 236836271)
        first_channel = scan[:, :, :, 0, :]
        self.assertEqualShapeAndSum(first_channel, (3, 512, 512, 366),
                                    468225501096)
        first_frame = scan[:, :, :, :, 0]
        self.assertEqualShapeAndSum(first_frame, (3, 512, 512, 2), 1381773476)
예제 #12
0
    def test_5_1(self):
        scan = scanreader.read_scan(scan_file_5_1)

        # Test it is iterable
        fields_sum = [114187329049, 119703328706, 125845219838]
        for i, field in enumerate(scan):
            self.assertEqualShapeAndSum(field, (256, 256, 2, 1000),
                                        fields_sum[i])

        # Test it can be obtained as array
        scan_as_array = np.array(scan)
        self.assertEqualShapeAndSum(scan_as_array, (3, 256, 256, 2, 1000),
                                    359735877593)

        # Test indexation
        first_field = scan[0, :, :, :, :]
        self.assertEqualShapeAndSum(first_field, (256, 256, 2, 1000),
                                    114187329049)
        first_row = scan[:, 0, :, :, :]
        self.assertEqualShapeAndSum(first_row, (3, 256, 2, 1000), 917519804)
        first_column = scan[:, :, 0, :, :]
        self.assertEqualShapeAndSum(first_column, (3, 256, 2, 1000), 498901499)
        first_channel = scan[:, :, :, 0, :]
        self.assertEqualShapeAndSum(first_channel, (3, 256, 256, 1000),
                                    340492324453)
        first_frame = scan[:, :, :, :, 0]
        self.assertEqualShapeAndSum(first_frame, (3, 256, 256, 2), 337564522)
예제 #13
0
    def test_2016b(self):
        scan = scanreader.read_scan(stack_file_2016b)

        # Test it is iterable
        fields_sum = [-7855587]
        for i, field in enumerate(scan):
            self.assertEqualShapeAndSum(field, (256, 256, 1, 200),
                                        fields_sum[i])

        # Test it can be obtained as array
        scan_as_array = np.array(scan)
        self.assertEqualShapeAndSum(scan_as_array, (1, 256, 256, 1, 200),
                                    -7855587)

        # Test indexation
        first_field = scan[0, :, :, :, :]
        self.assertEqualShapeAndSum(first_field, (256, 256, 1, 200), -7855587)
        first_row = scan[:, 0, :, :, :]
        self.assertEqualShapeAndSum(first_row, (1, 256, 1, 200), -30452)
        first_column = scan[:, :, 0, :, :]
        self.assertEqualShapeAndSum(first_column, (1, 256, 1, 200), -31680)
        first_channel = scan[:, :, :, 0, :]
        self.assertEqualShapeAndSum(first_channel, (1, 256, 256, 200),
                                    -7855587)
        first_frame = scan[:, :, :, :, 0]
        self.assertEqualShapeAndSum(first_frame, (1, 256, 256, 1), -42389)
예제 #14
0
    def test_2016b_multiroi(self):
        scan = scanreader.read_scan(stack_file_2016b_multiroi)

        # Test it is iterable
        for i, field in enumerate(scan):
            self.assertEqual(field.shape, (360, 120, 2, 10))
        self.assertEqual(i, 203)

        # Test it can be obtained as array
        scan_as_array = np.array(scan)
        self.assertEqualShapeAndSum(scan_as_array, (204, 360, 120, 2, 10),
                                    30797502048)

        # Test indexation
        first_field = scan[0, :, :, :, :]
        self.assertEqualShapeAndSum(first_field, (360, 120, 2, 10), 148674123)
        first_row = scan[:, 0, :, :, :]
        self.assertEqualShapeAndSum(first_row, (204, 120, 2, 10), 70350224)
        first_column = scan[:, :, 0, :, :]
        self.assertEqualShapeAndSum(first_column, (204, 360, 2, 10), 160588726)
        first_channel = scan[:, :, :, 0, :]
        self.assertEqualShapeAndSum(first_channel, (204, 360, 120, 10),
                                    26825949131)
        first_frame = scan[:, :, :, :, 0]
        self.assertEqualShapeAndSum(first_frame, (204, 360, 120, 2),
                                    2952050950)
예제 #15
0
def ingest():
    # ========== Insert new "Session" and "Scan" ===========
    data_dir = get_imaging_root_data_dir()

    # Folder structure: root / subject / session / .tif (raw)
    sessions, scans, scanners = [], [], []
    sess_folder_names = []
    for subj_key in subject.Subject.fetch('KEY'):
        subj_dir = data_dir / subj_key['subject']
        if subj_dir.exists():
            for tiff_filepath in subj_dir.glob('*/*.tif'):
                sess_folder = tiff_filepath.parent.name
                if sess_folder not in sess_folder_names:
                    tiff_filepaths = [
                        fp.as_posix()
                        for fp in (subj_dir / sess_folder).glob('*.tif')
                    ]
                    try:  # attempt to read .tif as a scanimage file
                        scan = scanreader.read_scan(tiff_filepaths)
                    except Exception as e:
                        print(
                            f'ScanImage loading error: {tiff_filepaths}\n{str(e)}'
                        )
                        scan = None

                    if scan is not None:
                        recording_time = get_scanimage_acq_time(scan)
                        header = parse_scanimage_header(scan)
                        scanner = header['SI_imagingSystem']

                        scanners.append({'scanner': scanner})
                        sessions.append({
                            **subj_key, 'session_datetime':
                            recording_time
                        })
                        scans.append({
                            **subj_key, 'session_datetime': recording_time,
                            'scan_id': 0,
                            'scanner': scanner
                        })
                        sess_folder_names.append(sess_folder)

    print(f'Inserting {len(sessions)} session(s)')
    Scanner.insert(scanners, skip_duplicates=True)
    Session.insert(sessions, skip_duplicates=True)
    imaging.Scan.insert(scans, skip_duplicates=True)

    # ========== Create ProcessingTask for each scan ===========

    imaging.ProcessingTask.insert([{
        **sc, 'processing_instance': uuid.uuid4(),
        'processing_method': 'suite2p',
        'paramset_idx': 0
    } for sc in imaging.Scan.fetch('KEY')])
예제 #16
0
def _get_scan_sample(key,
                     sample_length=15,
                     sample_size=(-1, -1),
                     sample_fps=5):
    """ Load and correct the scan, get some frames from the middle, resize them and
    interpolate to 5 Hz.

    Arguments:
        key: Dictionary with scan keys including slice and channel.
        length: Length (in minutes) of the sample.
        size: (height, width) Spatial dimensions for the sample.
        fps: Desired frames per second of the sample.
    """
    import scanreader
    from scipy.interpolate import interp1d

    # Read the scan
    scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
    scan = scanreader.read_scan(scan_filename, dtype=np.float32)

    # Load scan
    half_length = round(sample_length / 2 * 60 *
                        scan.fps)  # 7.5 minutes of recording
    if (scan.num_frames < half_length * 2):
        raise ValueError('Scan {} is too short (< {} min long).'.format(
            key, sample_length))
    middle_frame = int(np.floor(scan.num_frames / 2))
    frames = slice(middle_frame - half_length, middle_frame + half_length)
    sample = scan[key['slice'] - 1, :, :, key['channel'] - 1, frames]
    num_frames = sample.shape[-1]

    # Correct the scan
    correct_raster = (reso.RasterCorrection() & key).get_correct_raster()
    correct_motion = (reso.MotionCorrection() & key).get_correct_motion()
    corrected_sample = correct_motion(correct_raster(sample), frames)

    # Resize
    resized_sample = np.empty([*sample_size, num_frames], dtype=np.float32)
    for i in range(num_frames):
        resized_sample[:, :, i] = misc.imresize(corrected_sample[:, :, i],
                                                sample_size,
                                                interp='lanczos',
                                                mode='F')
    resized_sample = corrected_sample

    # Interpolate to desired frame rate (if memory is a constrain, run per pixel)
    num_output_frames = round(sample_length * 60 * sample_fps)
    f = interp1d(np.linspace(0, 1, num_frames),
                 resized_sample,
                 kind='cubic',
                 copy=False)
    output_sample = f(np.linspace(0, 1, num_output_frames))

    return output_sample
예제 #17
0
파일: stack.py 프로젝트: zhoupc/ease
    def _make_tuples(self, key):
        """ Compute motion shifts to align frames over time and over slices."""
        print('Computing motion correction for ROI', key)

        # Get some params
        res = (StackInfo.ROI() & key).fetch1('nframes', 'roi_px_height',
                                             'roi_px_width', 'field_ids')
        num_frames, image_height, image_width, field_ids = res
        correction_channel = (CorrectionChannel() & key).fetch1('channel') - 1

        y_shifts = np.zeros([len(field_ids), num_frames])
        x_shifts = np.zeros([len(field_ids), num_frames])
        if num_frames > 1:
            # Read the ROI
            filename_rel = (experiment.Stack.Filename() &
                            (StackInfo.ROI() & key))
            roi_filename = filename_rel.local_filenames_as_wildcard
            roi = scanreader.read_scan(roi_filename)

            # Compute some params
            skip_rows = int(round(image_height * 0.10))
            skip_cols = int(round(image_width * 0.10))

            # Map: Compute shifts in parallel
            f = performance.parallel_motion_stack  # function to map
            raster_phase = (RasterCorrection() & key).fetch1('raster_phase')
            fill_fraction = (StackInfo() & key).fetch1('fill_fraction')
            max_y_shift, max_x_shift = 20 / (StackInfo.ROI()
                                             & key).microns_per_pixel
            results = performance.map_fields(f,
                                             roi,
                                             field_ids=field_ids,
                                             channel=correction_channel,
                                             kwargs={
                                                 'raster_phase': raster_phase,
                                                 'fill_fraction':
                                                 fill_fraction,
                                                 'skip_rows': skip_rows,
                                                 'skip_cols': skip_cols,
                                                 'max_y_shift': max_y_shift,
                                                 'max_x_shift': max_x_shift
                                             })

            # Reduce: Collect results
            for field_idx, y_shift, x_shift in results:
                y_shifts[field_idx] = y_shift
                x_shifts[field_idx] = x_shift

        # Insert
        self.insert1({**key, 'y_shifts': y_shifts, 'x_shifts': x_shifts})

        self.notify(key)
예제 #18
0
파일: stack.py 프로젝트: zhoupc/ease
    def _make_tuples(self, key):
        """ Compute raster phase discarding top and bottom 15% of slices and tapering
        edges to avoid edge artifacts."""
        print('Computing raster correction for ROI', key)

        # Get some params
        res = (StackInfo.ROI() & key).fetch1('bidirectional', 'roi_px_height',
                                             'roi_px_width', 'field_ids')
        is_bidirectional, image_height, image_width, field_ids = res
        correction_channel = (CorrectionChannel() & key).fetch1('channel') - 1

        if is_bidirectional:
            # Read the ROI
            filename_rel = (experiment.Stack.Filename() &
                            (StackInfo.ROI() & key))
            roi_filename = filename_rel.local_filenames_as_wildcard
            roi = scanreader.read_scan(roi_filename)

            # Compute some parameters
            skip_fields = max(1, int(round(len(field_ids) * 0.10)))
            taper = np.sqrt(
                np.outer(signal.tukey(image_height, 0.4),
                         signal.tukey(image_width, 0.4)))

            # Compute raster phase for each slice and take the median
            raster_phases = []
            for field_id in field_ids[skip_fields:-2 * skip_fields]:
                # Create template (average frame tapered to avoid edge artifacts)
                slice_ = roi[field_id, :, :,
                             correction_channel, :].astype(np.float32,
                                                           copy=False)
                anscombed = 2 * np.sqrt(slice_ - slice_.min(axis=(0, 1)) +
                                        3 / 8)  # anscombe transform
                template = np.mean(anscombed, axis=-1) * taper

                # Compute raster correction
                raster_phases.append(
                    galvo_corrections.compute_raster_phase(
                        template, roi.temporal_fill_fraction))
            raster_phase = np.median(raster_phases)
            raster_std = np.std(raster_phases)
        else:
            raster_phase = 0
            raster_std = 0

        # Insert
        self.insert1({
            **key, 'raster_phase': raster_phase,
            'raster_std': raster_std
        })

        self.notify(key)
예제 #19
0
    def test_exceptions(self):
        """ Tests some exceptions are raised correctly. """
        # Wrong type and inexistent file
        self.assertRaises(TypeError, lambda: scanreader.read_scan(None))
        self.assertRaises(ScanReaderException, lambda: scanreader.read_scan('inexistent_file.tif'))

        scan = scanreader.read_scan(scan_file_5_1)

        # Too many dimensions
        self.assertRaises(IndexError, lambda: scan[0, 1, 2, 3, 4, 5])

        # Out of bounds, shape is (3, 256, 256, 2, 1000)
        self.assertRaises(IndexError, lambda: scan[-4])
        self.assertRaises(IndexError, lambda: scan[:, -257])
        self.assertRaises(IndexError, lambda: scan[:, :, -257])
        self.assertRaises(IndexError, lambda: scan[:, :, :, -3])
        self.assertRaises(IndexError, lambda: scan[:, :, :, :, -1001])

        # Wrong index type
        self.assertRaises(TypeError, lambda: scan[1, 'sup!'])
        self.assertRaises(TypeError, lambda: scan[[True, False, True]])
        self.assertRaises(TypeError, lambda: scan[0.1])
        self.assertRaises(TypeError, lambda: scan[0, ...])
예제 #20
0
    def test_advanced_indexing(self):
        """ Testing advanced indexing functionality."""
        scan = scanreader.read_scan(scan_file_5_1)

        # Testing slices
        part = scan[:, :200, :, 0, -100:]
        self.assertEqualShapeAndSum(part, (3, 200, 256, 100), 22309059758)
        part = scan[::-2]
        self.assertEqualShapeAndSum(part, (2, 256, 256, 2, 1000), 240032548887)

        # Testing lists
        part = scan[:, :, :, [-1, 0, 0, 1], :]
        self.assertEqualShapeAndSum(part, (3, 256, 256, 4, 1000), 719471755186)

        # Testing empty indices
        part = scan[:, :, :, 2:1, :]
        self.assertEqual(part.size, 0)
        part = scan[:, :, [], :, :]
        self.assertEqual(part.size, 0)

        # One field from a page appears twice separated by a field in another page
        scan = scanreader.read_scan(scan_file_2016b_multiroi)
        part = scan[[9, 3, 8, 3, 9, 8]]
        self.assertEqualShapeAndSum(part, (6, 500, 250, 1, 100), 35610995260)
예제 #21
0
파일: stack.py 프로젝트: zhoupc/ease
    def _make_tuples(self, key):
        print('Computing quality metrics for stack', key)

        # Insert in Quality
        self.insert1(key)

        for roi_tuple in (StackInfo.ROI() & key).fetch():
            # Load ROI
            roi_filename = (experiment.Stack.Filename()
                            & roi_tuple).local_filenames_as_wildcard
            roi = scanreader.read_scan(roi_filename)

            for channel in range((StackInfo() & key).fetch1('nchannels')):
                # Map: Compute quality metrics in each field
                f = performance.parallel_quality_stack  # function to map
                field_ids = roi_tuple['field_ids']
                results = performance.map_fields(f,
                                                 roi,
                                                 field_ids=field_ids,
                                                 channel=channel)

                # Reduce: Collect results
                mean_intensities = np.empty(
                    (roi_tuple['roi_px_depth'], roi_tuple['nframes']))
                contrasts = np.empty(
                    (roi_tuple['roi_px_depth'], roi_tuple['nframes']))
                for field_idx, field_mis, field_contrasts, _ in results:
                    mean_intensities[field_idx] = field_mis
                    contrasts[field_idx] = field_contrasts
                frames = [
                    res[3] for res in sorted(results, key=lambda res: res[0])
                ]
                frames = np.stack(frames[::int(len(frames) / 8)],
                                  axis=-1)  # frames at 8 diff depths

                # Insert
                roi_key = {
                    **key, 'roi_id': roi_tuple['roi_id'],
                    'channel': channel + 1
                }
                self.MeanIntensity().insert1({
                    **roi_key, 'intensities':
                    mean_intensities
                })
                self.Contrast().insert1({**roi_key, 'contrasts': contrasts})
                self.SummaryFrames().insert1({**roi_key, 'summary': frames})

                self.notify(roi_key, frames, mean_intensities, contrasts)
예제 #22
0
파일: tuning.py 프로젝트: vathes/odor_meso
    def _make_tuples(self, key):
        # Load the scan
        import scanreader
        scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
        scan = scanreader.read_scan(scan_filename)
        scan = (scan[key['slice']-1, :, :, 0, :]).astype(np.float32, copy=False)

        # Correct the scan
        correct_motion = (preprocess.Prepare.GalvoMotion() & key).get_correct_motion()
        correct_raster = (preprocess.Prepare.Galvo() & key).get_correct_raster()
        scan = correct_motion(correct_raster(scan))
        design, cov = (OriDesignMatrix() & key).fetch1['design_matrix', 'regressor_cov']
        height, width, nslices = (preprocess.Prepare.Galvo() & key).fetch1('px_height', 'px_width', 'nslices')
        design = design[key['slice'] - 1::nslices, :]
        if scan.shape[2] == 2*design.shape[0]:
            scan = (scan[:,:,::2] + scan[:,:,1::2])/2  # this is a hack for mesoscope scanner -- needs fixing

        assert design.shape[0] == scan.shape[2]
        height, width = scan.shape[0:2]    # hack for mesoscope -- needs fixing
        assert (height, width) == scan.shape[0:2]

        # remove periods where the design matrix has any nans
        ix = np.logical_not(np.isnan(design).any(axis=1))
        design = design[ix, :]
        design = design - design.mean()
        nregressors = design.shape[1]

        # normalize scan
        m = scan.mean(axis=-1, keepdims=True)
        scan -= m
        scan /= m
        v = (scan**2).sum(axis=-1)

        # estimate degrees of freedom per pixel
        spectrum = np.abs(np.fft.fft(scan, axis=-1))
        dof = (spectrum.sum(axis=-1)**2/(spectrum**2).sum(axis=-1)).astype(np.int32)

        # solve
        scan = scan[:, :, ix].reshape((-1, design.shape[0])).T
        x, r2, rank, sv = linalg.lstsq(design, scan, overwrite_a=True, overwrite_b=True, check_finite=False)
        del scan, design

        assert rank == nregressors
        x = x.T.reshape((height, width, -1))
        r2 = 1-r2.reshape((height, width))/v

        self.insert1(dict(key, regr_coef_maps=x, r2_map=r2, dof_map=dof))
예제 #23
0
    def make(self, key):
        from pipeline.utils import performance

        # Read scan
        print("Reading scan...")
        scan_filename = (experiment.Scan & key).local_filenames_as_wildcard
        scan = scanreader.read_scan(scan_filename)

        # Get some params
        pipe = (fuse.MotionCorrection() & key).module

        # Map: Correct scan in parallel
        f = performance.parallel_correct_scan  # function to map
        raster_phase = (pipe.RasterCorrection() & key).fetch1("raster_phase")
        fill_fraction = (pipe.ScanInfo() & key).fetch1("fill_fraction")
        y_shifts, x_shifts = (pipe.MotionCorrection() & key).fetch1(
            "y_shifts", "x_shifts")
        kwargs = {
            "raster_phase": raster_phase,
            "fill_fraction": fill_fraction,
            "y_shifts": y_shifts,
            "x_shifts": x_shifts,
        }
        results = performance.map_frames(
            f,
            scan,
            field_id=key["field"] - 1,
            channel=key["channel"] - 1,
            kwargs=kwargs,
        )

        # Reduce: Rescale and save as int16
        height, width, _ = results[0][1].shape
        corrected_scan = np.zeros([height, width, scan.num_frames],
                                  dtype=np.int16)
        max_abs_intensity = max(np.abs(c).max() for f, c in results)
        scale = max_abs_intensity / (2**15 - 1)
        for frames, chunk in results:
            corrected_scan[:, :, frames] = (chunk / scale).astype(np.int16)

        # Insert
        self.insert1({
            **key, "scale_factor": scale,
            "corrected_scan": corrected_scan
        })
예제 #24
0
파일: stack.py 프로젝트: zhoupc/ease
    def save_as_tiff(self, filename='roi.tif', channel=1):
        """ Correct roi and save as a tiff file.

        :param int channel: What channel to use. Starts at 1
        """
        from tifffile import imsave

        # Get some params
        res = (StackInfo.ROI() & self).fetch1('field_ids', 'roi_px_depth',
                                              'roi_px_height', 'roi_px_width')
        field_ids, px_depth, px_height, px_width = res

        # Load ROI
        roi_filename = (experiment.Stack.Filename()
                        & self).local_filenames_as_wildcard
        roi = scanreader.read_scan(roi_filename)

        # Map: Apply corrections to each field in parallel
        f = performance.parallel_correct_stack  # function to map
        raster_phase = (RasterCorrection() & self).fetch1('raster_phase')
        fill_fraction = (StackInfo() & self).fetch1('fill_fraction')
        y_shifts, x_shifts = self.fetch1('y_shifts', 'x_shifts')
        results = performance.map_fields(f,
                                         roi,
                                         field_ids=field_ids,
                                         channel=channel,
                                         kwargs={
                                             'raster_phase': raster_phase,
                                             'fill_fraction': fill_fraction,
                                             'y_shifts': y_shifts,
                                             'x_shifts': x_shifts
                                         })

        # Reduce: Collect results
        corrected_roi = np.empty((px_depth, px_height, px_width),
                                 dtype=np.float32)
        for field_idx, corrected_field in results:
            corrected_roi[field_idx] = corrected_field

        print('Saving file at:', filename)
        imsave(filename, corrected_roi)
예제 #25
0
def process_scan(scan_key):
    """
    For each entry in `imaging.Scan` table, search for scan data and create a corresponding entry in `scan_element.Scan`
    :param scan_key: a `KEY` of `imaging.Scan`
    """
    for fov_key in (imaging.FieldOfView & scan_key).fetch('KEY'):

        scan_filepaths = get_scan_image_files(fov_key)

        try:  # attempt to read .tif as a scanimage file
            loaded_scan = scanreader.read_scan(scan_filepaths)
            header = parse_scanimage_header(loaded_scan)
            scanner = header['SI_imagingSystem'].strip('\'')
        except Exception as e:
            print(f'ScanImage loading error: {scan_filepaths}\n{str(e)}')
            return
        scan_key = {**scan_key, 'scan_id': fov_key['fov']}
        if scan_key not in scan_element.Scan():
            Equipment.insert1({'scanner': scanner}, skip_duplicates=True)
            scan_element.Scan.insert1(
                {**scan_key, 'scanner': scanner, 'acq_software': acq_software})
예제 #26
0
파일: stack.py 프로젝트: zhoupc/ease
    def _make_tuples(self, key):
        """ Read and store stack information."""
        print('Reading header...')

        # Read files forming this stack
        filename_keys = (experiment.Stack.Filename() & key).fetch(dj.key)
        stacks = []
        for filename_key in filename_keys:
            stack_filename = (experiment.Stack.Filename()
                              & filename_key).local_filenames_as_wildcard
            stacks.append(scanreader.read_scan(stack_filename))
        num_rois_per_file = [(stack.num_rois if stack.is_multiROI else 1)
                             for stack in stacks]

        # Create Stack tuple
        tuple_ = key.copy()
        tuple_['nrois'] = np.sum(num_rois_per_file)
        tuple_['nchannels'] = stacks[0].num_channels
        tuple_['z_step'] = abs(stacks[0].scanning_depths[1] -
                               stacks[0].scanning_depths[0])
        tuple_['fill_fraction'] = stacks[0].temporal_fill_fraction

        # Insert Stack
        StackInfo().insert1(tuple_)

        # Insert ROIs
        roi_id = 1
        for filename_key, num_rois, stack in zip(filename_keys,
                                                 num_rois_per_file, stacks):
            for id_in_file in range(num_rois):
                roi_key = {**key, **filename_key, 'roi_id': roi_id}
                StackInfo.ROI()._make_tuples(roi_key, stack, id_in_file)
                roi_id += 1

        # Fill in CorrectionChannel if only one channel
        if stacks[0].num_channels == 1:
            CorrectionChannel().fill_in(key)

        self.notify(key)
예제 #27
0
def get_scan_image_files(scan_key):
    # Folder structure: root / subject / session / .tif (raw)
    data_dir = get_imaging_root_data_dir()
    subj_dir = data_dir / scan_key['subject']
    if subj_dir.exists():
        sess_dirs = set([fp.parent for fp in subj_dir.glob('*/*.tif')])
        for sess_folder in sess_dirs:
            tiff_filepaths = list((subj_dir / sess_folder).glob('*.tif'))
            try:  # attempt to read .tif as a scanimage file
                scan = scanreader.read_scan(
                    [fp.as_posix() for fp in tiff_filepaths])
            except Exception as e:
                print(
                    f'ScanImage loading error: {tiff_filepaths[0]}\n{str(e)}')
                scan = None

            if scan is not None:
                recording_time = get_scanimage_acq_time(scan)

                recording_time_diff = abs((scan_key['session_datetime'] -
                                           recording_time).total_seconds())
                if recording_time_diff <= 120:  # safeguard that
                    return tiff_filepaths
예제 #28
0
def ingest_sessions(session_csv_path="./user_data/sessions.csv",
                    skip_duplicates=True,
                    verbose=True):
    root_data_dir = get_imaging_root_data_dir()

    # ---------- Insert new "Session" and "Scan" ---------
    with open(session_csv_path, newline="") as f:
        input_sessions = list(csv.DictReader(f, delimiter=","))

    # Folder structure: root / subject / session / .tif (raw)
    session_list, session_dir_list, scan_list, scanner_list = [], [], [], []

    for sess in input_sessions:
        sess_dir = find_full_path(root_data_dir, Path(sess["session_dir"]))

        # search for either ScanImage or Scanbox files (in that order)
        for scan_pattern, scan_type, glob_func in zip(
            ["*.tif", "*.sbx"],
            ["ScanImage", "Scanbox"],
            [sess_dir.glob, sess_dir.rglob],
        ):
            scan_filepaths = [fp.as_posix() for fp in glob_func(scan_pattern)]
            if len(scan_filepaths):
                acq_software = scan_type
                break
        else:
            raise FileNotFoundError(
                "Unable to identify scan files from the supported " +
                "acquisition softwares (ScanImage, Scanbox) at: " +
                f"{sess_dir}")

        if acq_software == "ScanImage":
            import scanreader
            from element_interface import scanimage_utils

            try:  # attempt to read .tif as a scanimage file
                loaded_scan = scanreader.read_scan(scan_filepaths)
                recording_time = scanimage_utils.get_scanimage_acq_time(
                    loaded_scan)
                header = scanimage_utils.parse_scanimage_header(loaded_scan)
                scanner = header["SI_imagingSystem"].strip("'")
            except Exception as e:
                print(f"ScanImage loading error: {scan_filepaths}\n{str(e)}")
                continue
        elif acq_software == "Scanbox":
            import sbxreader

            try:  # attempt to load Scanbox
                sbx_fp = pathlib.Path(scan_filepaths[0])
                sbx_meta = sbxreader.sbx_get_metadata(sbx_fp)
                # read from file when Scanbox support this
                recording_time = datetime.fromtimestamp(sbx_fp.stat().st_ctime)
                scanner = sbx_meta.get("imaging_system", "Scanbox")
            except Exception as e:
                print(f"Scanbox loading error: {scan_filepaths}\n{str(e)}")
                continue
        else:
            raise NotImplementedError(
                "Processing scan from acquisition software of " +
                f"type {acq_software} is not yet implemented")

        session_key = {
            "subject": sess["subject"],
            "session_datetime": recording_time
        }
        if session_key not in session.Session():
            scanner_list.append({"scanner": scanner})
            session_list.append(session_key)
            scan_list.append({
                **session_key,
                "scan_id": 0,
                "scanner": scanner,
                "acq_software": acq_software,
            })

            session_dir_list.append({
                **session_key,
                "session_dir":
                sess_dir.relative_to(root_data_dir).as_posix(),
            })
    new_equipment = set(val for dic in scanner_list for val in dic.values())
    if verbose:
        print(f"\n---- Insert {len(new_equipment)} entry(s) into " +
              "experiment.Equipment ----")
    Equipment.insert(scanner_list, skip_duplicates=True)

    if verbose:
        print(
            f"\n---- Insert {len(session_list)} entry(s) into session.Session ----"
        )
    session.Session.insert(session_list)
    session.SessionDirectory.insert(session_dir_list)

    if verbose:
        print(f"\n---- Insert {len(scan_list)} entry(s) into scan.Scan ----")
    scan.Scan.insert(scan_list, skip_duplicates=skip_duplicates)

    if verbose:
        print("\n---- Successfully completed ingest_sessions ----")
예제 #29
0
    def make(self, key):
        acq_software = (Scan & key).fetch1("acq_software")

        if acq_software == "ScanImage":
            import scanreader

            # Read the scan
            scan_filepaths = get_scan_image_files(key)
            scan = scanreader.read_scan(scan_filepaths)

            # Insert in ScanInfo
            x_zero = (scan.motor_position_at_zero[0]
                      if scan.motor_position_at_zero else None)
            y_zero = (scan.motor_position_at_zero[1]
                      if scan.motor_position_at_zero else None)
            z_zero = (scan.motor_position_at_zero[2]
                      if scan.motor_position_at_zero else None)

            self.insert1(
                dict(
                    key,
                    nfields=scan.num_fields,
                    nchannels=scan.num_channels,
                    nframes=scan.num_frames,
                    ndepths=scan.num_scanning_depths,
                    x=x_zero,
                    y=y_zero,
                    z=z_zero,
                    fps=scan.fps,
                    bidirectional=scan.is_bidirectional,
                    usecs_per_line=scan.seconds_per_line * 1e6,
                    fill_fraction=scan.temporal_fill_fraction,
                    nrois=scan.num_rois if scan.is_multiROI else 0,
                    scan_duration=scan.num_frames / scan.fps,
                ))
            # Insert Field(s)
            if scan.is_multiROI:
                self.Field.insert([
                    dict(
                        key,
                        field_idx=field_id,
                        px_height=scan.field_heights[field_id],
                        px_width=scan.field_widths[field_id],
                        um_height=scan.field_heights_in_microns[field_id],
                        um_width=scan.field_widths_in_microns[field_id],
                        field_x=x_zero +
                        scan._degrees_to_microns(scan.fields[field_id].x)
                        if x_zero else None,
                        field_y=y_zero +
                        scan._degrees_to_microns(scan.fields[field_id].y)
                        if y_zero else None,
                        field_z=z_zero +
                        scan.fields[field_id].depth if z_zero else None,
                        delay_image=scan.field_offsets[field_id],
                        roi=scan.field_rois[field_id][0],
                    ) for field_id in range(scan.num_fields)
                ])
            else:
                self.Field.insert([
                    dict(
                        key,
                        field_idx=plane_idx,
                        px_height=scan.image_height,
                        px_width=scan.image_width,
                        um_height=getattr(scan, "image_height_in_microns",
                                          None),
                        um_width=getattr(scan, "image_width_in_microns", None),
                        field_x=x_zero if x_zero else None,
                        field_y=y_zero if y_zero else None,
                        field_z=z_zero +
                        scan.scanning_depths[plane_idx] if z_zero else None,
                        delay_image=scan.field_offsets[plane_idx],
                    ) for plane_idx in range(scan.num_scanning_depths)
                ])
        elif acq_software == "Scanbox":
            import sbxreader

            # Read the scan
            scan_filepaths = get_scan_box_files(key)
            sbx_meta = sbxreader.sbx_get_metadata(scan_filepaths[0])
            sbx_matinfo = sbxreader.sbx_get_info(scan_filepaths[0])
            is_multiROI = bool(sbx_matinfo.mesoscope.enabled
                               )  # currently not handling "multiROI" ingestion

            if is_multiROI:
                raise NotImplementedError(
                    "Loading routine not implemented for Scanbox multiROI scan mode"
                )

            # Insert in ScanInfo
            x_zero, y_zero, z_zero = sbx_meta["stage_pos"]
            self.insert1(
                dict(
                    key,
                    nfields=sbx_meta["num_fields"]
                    if is_multiROI else sbx_meta["num_planes"],
                    nchannels=sbx_meta["num_channels"],
                    nframes=sbx_meta["num_frames"],
                    ndepths=sbx_meta["num_planes"],
                    x=x_zero,
                    y=y_zero,
                    z=z_zero,
                    fps=sbx_meta["frame_rate"],
                    bidirectional=sbx_meta == "bidirectional",
                    nrois=sbx_meta["num_rois"] if is_multiROI else 0,
                    scan_duration=(sbx_meta["num_frames"] /
                                   sbx_meta["frame_rate"]),
                ))
            # Insert Field(s)
            if not is_multiROI:
                px_width, px_height = sbx_meta["frame_size"]
                self.Field.insert([
                    dict(
                        key,
                        field_idx=plane_idx,
                        px_height=px_height,
                        px_width=px_width,
                        um_height=px_height * sbx_meta["um_per_pixel_y"]
                        if sbx_meta["um_per_pixel_y"] else None,
                        um_width=px_width * sbx_meta["um_per_pixel_x"]
                        if sbx_meta["um_per_pixel_x"] else None,
                        field_x=x_zero,
                        field_y=y_zero,
                        field_z=z_zero + sbx_meta["etl_pos"][plane_idx],
                    ) for plane_idx in range(sbx_meta["num_planes"])
                ])
        elif acq_software == "NIS":
            import nd2

            # Read the scan
            scan_filepaths = get_nd2_files(key)
            nd2_file = nd2.ND2File(scan_filepaths[0])
            is_multiROI = False  # MultiROI to be implemented later

            # Frame per second
            try:
                fps = 1000 / nd2_file.experiment[0].parameters.periods[
                    0].periodDiff.avg
            except:
                fps = 1000 / nd2_file.experiment[0].parameters.periodDiff.avg

            # Estimate ND2 file scan duration
            def estimate_nd2_scan_duration(nd2_scan_obj):
                # Calculates scan duration for Nikon images
                ti = (nd2_scan_obj.frame_metadata(
                    0).channels[0].time.absoluteJulianDayNumber
                      )  # Initial frame's JD.
                tf = (nd2_scan_obj.frame_metadata(
                    nd2_scan_obj.shape[0] -
                    1).channels[0].time.absoluteJulianDayNumber
                      )  # Final frame's JD.

                return (tf - ti) * 86400 + 1 / fps

            scan_duration = sum([
                estimate_nd2_scan_duration(nd2.ND2File(f))
                for f in scan_filepaths
            ])

            # Insert in ScanInfo
            self.insert1(
                dict(
                    key,
                    nfields=nd2_file.sizes.get("P", 1),
                    nchannels=nd2_file.attributes.channelCount,
                    nframes=nd2_file.metadata.contents.frameCount,
                    ndepths=nd2_file.sizes.get("Z", 1),
                    x=None,
                    y=None,
                    z=None,
                    fps=fps,
                    bidirectional=bool(
                        nd2_file.custom_data["GrabberCameraSettingsV1_0"]
                        ["GrabberCameraSettings"]["PropertiesQuality"]
                        ["ScanDirection"]),
                    nrois=0,
                    scan_duration=scan_duration,
                ))

            # MultiROI to be implemented later

            # Insert in Field
            if not is_multiROI:
                self.Field.insert([
                    dict(
                        key,
                        field_idx=plane_idx,
                        px_height=nd2_file.attributes.heightPx,
                        px_width=nd2_file.attributes.widthPx,
                        um_height=nd2_file.attributes.heightPx *
                        nd2_file.voxel_size().y,
                        um_width=nd2_file.attributes.widthPx *
                        nd2_file.voxel_size().x,
                        field_x=None,
                        field_y=None,
                        field_z=None,
                    ) for plane_idx in range(nd2_file.sizes.get("Z", 1))
                ])
        else:
            raise NotImplementedError(
                f"Loading routine not implemented for {acq_software} "
                "acquisition software")

        # Insert file(s)
        root_dir = find_root_directory(get_imaging_root_data_dir(),
                                       scan_filepaths[0])

        scan_files = [
            pathlib.Path(f).relative_to(root_dir).as_posix()
            for f in scan_filepaths
        ]
        self.ScanFile.insert([{**key, "file_path": f} for f in scan_files])
예제 #30
0
파일: stack.py 프로젝트: zhoupc/ease
    def _make_tuples(self, key):
        print('Correcting stack', key)

        for channel in range((StackInfo() & key).fetch1('nchannels')):
            # Correct ROIs
            rois = []
            for roi_tuple in (StackInfo.ROI() * Stitching.ROICoordinates()
                              & key).fetch():
                # Load ROI
                roi_filename = (experiment.Stack.Filename()
                                & roi_tuple).local_filenames_as_wildcard
                roi = scanreader.read_scan(roi_filename)

                # Map: Apply corrections to each field in parallel
                f = performance.parallel_correct_stack  # function to map
                raster_phase = (RasterCorrection()
                                & roi_tuple).fetch1('raster_phase')
                fill_fraction = (StackInfo() & key).fetch1('fill_fraction')
                y_shifts, x_shifts = (MotionCorrection() & roi_tuple).fetch1(
                    'y_shifts', 'x_shifts')
                field_ids = roi_tuple['field_ids']
                results = performance.map_fields(f,
                                                 roi,
                                                 field_ids=field_ids,
                                                 channel=channel,
                                                 kwargs={
                                                     'raster_phase':
                                                     raster_phase,
                                                     'fill_fraction':
                                                     fill_fraction,
                                                     'y_shifts': y_shifts,
                                                     'x_shifts': x_shifts
                                                 })

                # Reduce: Collect results
                corrected_roi = np.empty(
                    (roi_tuple['roi_px_depth'], roi_tuple['roi_px_height'],
                     roi_tuple['roi_px_width']),
                    dtype=np.float32)
                for field_idx, corrected_field in results:
                    corrected_roi[field_idx] = corrected_field

                # Create ROI object
                xs, ys = list(roi_tuple['stitch_xs']), list(
                    roi_tuple['stitch_ys'])
                rois.append(
                    stitching.StitchedROI(corrected_roi,
                                          x=xs,
                                          y=ys,
                                          z=roi_tuple['stitch_z'],
                                          id_=roi_tuple['roi_id']))

            def join_rows(rois_):
                """ Iteratively join all rois that overlap in the same row."""
                sorted_rois = sorted(rois_, key=lambda roi: (roi.x, roi.y))

                prev_num_rois = float('inf')
                while len(sorted_rois) < prev_num_rois:
                    prev_num_rois = len(sorted_rois)

                    for left, right in itertools.combinations(sorted_rois, 2):
                        if left.is_aside_to(right):
                            left_xs = [s.x for s in left.slices]
                            left_ys = [s.y for s in left.slices]
                            right.join_with(left, left_xs, left_ys)
                            sorted_rois.remove(left)
                            break  # restart joining

                return sorted_rois

            # Stitch all rois together. This is convoluted because smooth blending in
            # join_with assumes rois are next to (not below or atop of) each other
            prev_num_rois = float('Inf')  # to enter the loop at least once
            while len(rois) < prev_num_rois:
                prev_num_rois = len(rois)

                # Join rows
                rois = join_rows(rois)

                # Join columns
                [roi.rot90() for roi in rois]
                rois = join_rows(rois)
                [roi.rot270() for roi in rois]

            # Check stitching went alright
            if len(rois) > 1:
                msg = 'ROIs for volume {} could not be stitched properly'.format(
                    key)
                raise PipelineException(msg)
            stitched = rois[0]

            # Insert in CorrectedStack
            roi_info = StackInfo.ROI() & key & {
                'roi_id': stitched.roi_coordinates[0].id
            }  # one roi from this volume
            tuple_ = {
                **key, 'x': stitched.x,
                'y': stitched.y,
                'z': stitched.z,
                'px_height': stitched.height,
                'px_width': stitched.width
            }
            tuple_[
                'um_height'] = stitched.height * roi_info.microns_per_pixel[0]
            tuple_['um_width'] = stitched.width * roi_info.microns_per_pixel[1]
            tuple_['px_depth'] = roi_info.fetch1(
                'roi_px_depth')  # same as original rois
            tuple_['um_depth'] = roi_info.fetch1(
                'roi_um_depth')  # same as original rois
            self.insert1(tuple_, skip_duplicates=True)

            # Insert each slice
            initial_z = stitched.z
            z_step = (StackInfo() & key).fetch1('z_step')
            for i, slice_ in enumerate(stitched.volume):
                self.Slice().insert1({
                    **key, 'channel': channel + 1,
                    'islice': i + 1,
                    'slice': slice_,
                    'slice_z': initial_z + i * z_step
                })

            self.notify({**key, 'channel': channel + 1})