예제 #1
0
 def test_annotations_roundtrip(self):
     with tb.File(self.h5_name, mode='a') as h5_file:
         io.annotate_database(h5_file)
         annotation_keys = sorted(io.read_annotations(h5_file).keys())
         self.assertEqual(annotation_keys, self.ref_annotation_keys)
         io.remove_annotations(h5_file)
         with self.assertRaises(tb.NoSuchNodeError):
             io.read_annotations(h5_file)
예제 #2
0
def filter_trajectories(movie_name,
                        min_good_fraction=.6,
                        min_vesicle_radius=100,
                        ignore_mixed=True):

    #read data
    with pd.HDFStore(str(movie_name), 'r') as fid:
        trajectories_data = fid['/trajectories_data']

    if len(trajectories_data) == 0:
        return

    with tables.File(str(movie_name), 'r') as fid:
        tot_frames, img_h, img_w = fid.get_node('/mask').shape

    #only keep trajectories that where tracked for at least `min_good_fraction` of the video
    tot_frames = trajectories_data['frame_number'].max() + 1
    traj_length = trajectories_data['worm_index_joined'].value_counts()

    min_frames = math.floor(min_good_fraction * tot_frames)
    valid_index_1 = traj_length.index[traj_length >= min_frames]

    #only keep vesicles that are at least `min_vesicle_radius`
    med_R = trajectories_data.groupby('worm_index_joined').agg(
        {'roi_size': 'median'})['roi_size']
    valid_index_2 = med_R.index[med_R > min_vesicle_radius]

    valid_index = set(valid_index_1) & set(valid_index_2)
    trajectories_data = trajectories_data[
        trajectories_data['worm_index_joined'].isin(valid_index)]

    #ignore rois that touch the border of the video
    rr = trajectories_data['roi_size'] / 2
    bad_x_l = (trajectories_data['coord_x'] - rr) < 0
    bad_y_l = (trajectories_data['coord_y'] - rr) < 0
    bad_x_r = (trajectories_data['coord_x'] + rr) > img_w
    bad_y_r = (trajectories_data['coord_y'] + rr) > img_h

    trajectories_data['in_image'] = ~(bad_x_l | bad_y_l | bad_x_r | bad_y_r)

    valid_index = trajectories_data.groupby('worm_index_joined').agg(
        {'in_image': 'all'})['in_image']
    try:
        valid_index = valid_index.index[valid_index]
    except:
        import pdb
        pdb.set_trace()

    if ignore_mixed:
        is_all_mixed = trajectories_data.groupby('worm_index_joined').agg(
            {'worm_label': 'max'})
        is_all_mixed = is_all_mixed['worm_label'] == 1
        valid_index = set(is_all_mixed.index[~is_all_mixed]) & set(valid_index)

    filtered_trajectories = trajectories_data[
        trajectories_data['worm_index_joined'].isin(valid_index)]
    #%%
    return filtered_trajectories
def getHeadProvInt(
        intensities_file,
        trajectories_worm,
        min_block_size,
        peak_search_limits):
    '''
    Calculate the probability of an intensity profile being in the correct orientation according to the intensity profile
    '''
    with tables.File(intensities_file, 'r') as fid:
        int_map_id = trajectories_worm.loc[
            trajectories_worm['int_map_id'] != -1, 'int_map_id']
        if int_map_id.size == 0 or int_map_id.size < min_block_size:
            # the number of maps is too small let's return nan's nothing to do
            # here
            return np.nan, np.nan, []

        worm_int = fid.get_node(
            '/straighten_worm_intensity_median')[int_map_id].astype(np.float)

    worm_int -= np.median(worm_int, axis=1)[:, np.newaxis]
    # get the median intensity profile
    median_int = np.median(worm_int, axis=0)

    # search for the peaks in the intensity profile (the head typically have a
    # minimum, follow by a maximum, then a minimum and then a maxima)
    peaks_ind = searchIntPeaks(median_int,
                               peak_search_limits=peak_search_limits)

    # calculate the distance between the second minima and the second maxima
    headbot2neck = median_int[peaks_ind[3][0]] - median_int[peaks_ind[2][0]]
    headbot2neck = 0 if headbot2neck < 0 else headbot2neck

    tailbot2waist = median_int[peaks_ind[3][1]] - median_int[peaks_ind[2][1]]
    tailbot2waist = 0 if tailbot2waist < 0 else tailbot2waist

    p_int_bot = headbot2neck / (headbot2neck + tailbot2waist)

    # calculate the distance between the second minima and the first maxima
    headtop2bot = median_int[peaks_ind[1][0]] - median_int[peaks_ind[2][0]]
    headtop2bot = 0 if headtop2bot < 0 else headtop2bot

    tailtop2bot = median_int[peaks_ind[1][1]] - median_int[peaks_ind[2][1]]
    tailtop2bot = 0 if tailtop2bot < 0 else tailtop2bot
    p_int_top = headtop2bot / (headtop2bot + tailtop2bot)

    int_group = (np.min(int_map_id), np.max(int_map_id))

    #    #%%
    #    plt.figure()
    #    plt.title(base_name)
    #    plt.plot(median_int, label ='0.3')
    #
    #    strC = 'rgck'
    #    for ii, dd in enumerate(peaks_ind):
    #        for xx in dd:
    #            plt.plot(xx, median_int[xx], 'o' + strC[ii])

    return p_int_top, p_int_bot, int_group
 def _update(self, frame_n):
     if frame_n >= self.next_frame:
         with tables.File(self.video_file, 'r') as fid:            
             self.full_img = fid.get_node('/full_data')[self.current_index]
             if self.full_interval is None:
                 self.full_interval = int(fid.get_node('/full_data')._v_attrs['save_interval'])
         
         self.current_index += 1
         self.next_frame = self.full_interval*(self.current_index + 1)
def correctTrajectories(trajectories_file, is_single_worm, join_traj_param):
    if is_single_worm:
        correctSingleWormCase(trajectories_file)
    else:
        joinTrajectories(trajectories_file, **join_traj_param)

    with tables.File(trajectories_file, "r+") as traj_fid:
        traj_fid.get_node('/plate_worms')._v_attrs['has_finished'] = 2
        traj_fid.flush()
예제 #6
0
    def __init__(self, filename):

        if not filename.endswith('.cxi'):
            raise IOError('Can only read .cxi files, got: %s' % filename)

        self._fhandle = tables.File(filename, 'r')
        self._ds1_data = self._fhandle.root.entry_1.instrument_1.detector_1.data

        return
예제 #7
0
def setTableAttribute(tfile, name, value):
    close = False
    if isinstance(tfile, str):
        tfile = tables.File(tfile, "a")
        close = True
    assert isinstance(tfile, tables.File)
    tfile.set_node_attr("/table", name, value)
    if close:
        tfile.close()
예제 #8
0
def save(name, u_kn, N_k, s_n=None, least_significant_digit=None):
    """Create an HDF5 dump of an existing MBAR job for later use / testing.
    
    Parameters
    ----------
    name : str
        Name of dataset
    u_kn : np.ndarray, dtype='float', shape=(n_states, n_samples)
        Reduced potential energies
    N_k : np.ndarray, dtype='int', shape=(n_states)
        Number of samples taken from each state
    s_n : np.ndarray, optional, default=None, dtype=int, shape=(n_samples)
        The state of origin of each state.  If none, guess the state origins.
    least_significant_digit : int, optional, default=None
        If not None, perform lossy compression using tables.Filter(least_significant_digit=least_significant_digit)

    Notes
    -----
    The output HDF5 files should be readible by the helper funtions pymbar_datasets.py
    """
    import tables

    (n_states, n_samples) = u_kn.shape

    u_kn = ensure_type(u_kn,
                       'float',
                       2,
                       "u_kn or Q_kn",
                       shape=(n_states, n_samples))
    N_k = ensure_type(N_k, 'int64', 1, "N_k", shape=(n_states, ))

    if s_n is None:
        s_n = get_sn(N_k)

    s_n = ensure_type(s_n, 'int64', 1, "s_n", shape=(n_samples, ))

    hdf_filename = os.path.join("./", "%s.h5" % name)
    f = tables.File(hdf_filename, 'a')
    f.createCArray("/",
                   "u_kn",
                   tables.Float64Atom(),
                   obj=u_kn,
                   filters=tables.Filters(
                       complevel=9,
                       complib="zlib",
                       least_significant_digit=least_significant_digit))
    f.createCArray("/",
                   "N_k",
                   tables.Int64Atom(),
                   obj=N_k,
                   filters=tables.Filters(complevel=9, complib="zlib"))
    f.createCArray("/",
                   "s_n",
                   tables.Int64Atom(),
                   obj=s_n,
                   filters=tables.Filters(complevel=9, complib="zlib"))
    f.close()
def process_h5_file(cur, conn, filepath):
    # -> open .h5 file using filepath
    h5_file = tables.File(filepath)
    ##################################### extracting and inserting song_table data#######################################
    song_table_data = []
    # song_id
    song_table_data.append(
        str(h5_file.root.metadata.songs.cols.song_id[0]).split("'")[1])
    # title
    song_table_data.append(
        str(h5_file.root.metadata.songs.cols.title[0]).split("'")[1])
    # artist_id
    song_table_data.append(
        str(h5_file.root.metadata.songs.cols.artist_id[0]).split("'")[1])
    # year
    song_table_data.append(h5_file.root.musicbrainz.songs.cols.year[0])
    # duration
    song_table_data.append(
        h5_file.root.analysis.songs.cols.duration[0])
    # changing datatype to native python
    song_table_data[3] = song_table_data[3].item()
    song_table_data[4] = song_table_data[4].item()

    # checking for nan and replace with 'Null'
    song_table_data = [None if isnan(
        val) else val for val in song_table_data]

    # inserting artist_table_data to song table
    cur.execute(song_table_insert, song_table_data)
    ##################################### extracting and inserting artist_table data #######################################
    # artist_id
    artist_table_data = []
    artist_table_data.append(
        str(h5_file.root.metadata.songs.cols.artist_id[0]).split("'")[1])
    # artist_name
    artist_table_data.append(
        str(h5_file.root.metadata.songs.cols.artist_name[0]).split("'")[1])
    # artist_location
    artist_table_data.append(
        str(h5_file.root.metadata.songs.cols.artist_location[0]).split("'")[1])
    # latitude and longitude
    artist_table_data.append(
        h5_file.root.metadata.songs.cols.artist_latitude[0])
    artist_table_data.append(
        h5_file.root.metadata.songs.cols.artist_longitude[0])
    # converting latiude and longitude from numpy.float64 to default python int
    artist_table_data[3] = artist_table_data[3].item()
    artist_table_data[4] = artist_table_data[4].item()

    # check and replace nans
    artist_table_data = [None if isnan(
        val) else val for val in artist_table_data]

    # inserting artist_table_data into artist_table
    cur.execute(artist_table_insert, artist_table_data)
    h5_file.close()
예제 #10
0
    def load_data_from_file(self, src_file):
        def is_in_fold(_id, fold):
            return ((_id - 1) % self.num_folds) == (fold - 1)

        if self.folds2include is None:
            _is_valid_fold = lambda x: True
        elif isinstance(self.folds2include, (int, float)):
            assert self.folds2include >= 1 and self.folds2include <= self.num_folds
            _is_valid_fold = lambda x: is_in_fold(x, self.folds2include)
        else:
            assert all(
                [x >= 1 and x <= self.num_folds for x in self.folds2include])
            _is_valid_fold = lambda x: any(
                [is_in_fold(x, fold) for fold in self.folds2include])

        #for the moment i am not sure how to deal with the type, so i am assuming a single class

        tid = 1
        data = {tid: {}}
        with tables.File(str(src_file), 'r') as fid:

            src_files = fid.get_node('/src_files')[:]

            images = fid.get_node('/images')
            masks = fid.get_node('/masks')

            for row in src_files:
                file_id = row['file_id']
                if not _is_valid_fold(file_id):
                    continue

                ii = file_id - 1
                img = images[ii]
                mask = masks[ii]

                if np.all([x <= self.max_input_size for x in img.shape[:-1]]):

                    data[tid][file_id] = [(img, mask)]
                else:

                    inds = []
                    for ss in img.shape[:-1]:
                        n_split = int(np.ceil(ss / self.max_input_size)) + 1
                        ind = np.linspace(0, ss,
                                          n_split)[1:-1].round().astype(np.int)
                        inds.append([0, *ind.tolist(), ss])

                    inds_i, inds_j = inds
                    for i1, i2 in zip(inds_i[:-1], inds_i[1:]):
                        for j1, j2 in zip(inds_j[:-1], inds_j[1:]):
                            roi = img[i1:i2, j1:j2]
                            roi_mask = mask[i1:i2, j1:j2]

                            data[tid][file_id].append((roi, roi_mask))

        return data
예제 #11
0
    def __init__(self, skeleton_file, worm_index, rows_range=(0, 0)):
        assert rows_range[0] <= rows_range[1]

        self.file_name = skeleton_file
        with tables.File(self.file_name, 'r') as file_id:
            #data range
            if rows_range[0] == 0 and rows_range[1] == 0:
                #try to read it from the file
                tab = file_id.get_node('/trajectories_data')
                skeleton_id = tab.read_where('worm_index_joined==%i' % 1,
                                             field='skeleton_id')

                #the indexes must be continous
                assert np.all(np.diff(skeleton_id) == 1)

                rows_range = (np.min(skeleton_id), np.max(skeleton_id))
                del skeleton_id

            ini, end = rows_range

            self.index = worm_index
            self.rows_range = rows_range
            self.data_fields = [
                'skeleton', 'skeleton_length', 'contour_side1',
                'contour_side2', 'contour_side1_length',
                'contour_side2_length', 'contour_width', 'contour_area'
            ]

            tab = file_id.get_node('/trajectories_data')
            self.frames = tab.read(ini, end, 1, 'frame_number')
            self.coord_x = tab.read(ini, end, 1, 'coord_x')
            self.coord_y = tab.read(ini, end, 1, 'coord_y')

            self.skeleton = file_id.get_node('/skeleton')[ini:end + 1, :, :]

            self.contour_side1 = file_id.get_node('/contour_side1')[ini:end +
                                                                    1, :, :]
            self.contour_side2 = file_id.get_node('/contour_side2')[ini:end +
                                                                    1, :, :]

            self.contour_side1_length = file_id.get_node(
                '/contour_side1_length')[ini:end + 1]  #pixels
            self.contour_side2_length = file_id.get_node(
                '/contour_side2_length')[ini:end + 1]  #pixels
            self.skeleton_length = file_id.get_node('/skeleton_length')[
                ini:end + 1]  #pixels

            self.contour_width = file_id.get_node('/contour_width')[ini:end +
                                                                    1, :]

            self.contour_area = file_id.get_node('/contour_area')[ini:end + 1]

        #change invalid data zeros for np.nan
        invalid = self.skeleton_length == 0
        for field in self.data_fields:
            getattr(self, field)[invalid] = np.nan
예제 #12
0
def image_reader(mask_file, frames2check=None):
    with tables.File(mask_file, 'r') as fid:
        masks = fid.get_node('/mask')

        if frames2check is None:
            frames2check = range(masks.shape[0])

        for frame in tqdm.tqdm(frames2check, desc='Frames readed'):
            img = masks[frame]
            yield frame, img
예제 #13
0
def readTableAttribute(tfile, name):
    close = False
    if isinstance(tfile, str):
        tfile = tables.File(tfile)
        close = True
    assert isinstance(tfile, tables.File)
    result = tfile.get_node_attr("/table", name)
    if close:
        tfile.close()
    return result
예제 #14
0
    def init_buffer(self):
        #we only accept masked files
        assert self.video_file.endswith('hdf5')
        with tables.File(self.video_file, 'r') as fid:
            _bgnd = fid.get_node('/bgnd')
            self.save_interval = int(_bgnd._v_attrs['save_interval'])
            self.precalculated_bgnd = _bgnd[:]

        self.last_frame = 0
        self.bgnd = self.precalculated_bgnd[0]
예제 #15
0
    def open_file(self, path):
        # CAUTION: This is a hack!
        # Use `open_file` when Fred updates os
        self.h5file = tables.File(path, mode="r")
        node = self.h5file.get_node('/', self.data_node)

        self.nodes = [getattr(node, source) for source in self.sources_in_file]
        if self.stop is None:
            self.stop = self.nodes[0].nrows
        self.num_examples = self.stop - self.start
예제 #16
0
 def test_built_in_solver_all_or_none(self):
     magni.cs.phase_transition.config['delta'] = [0.2, 0.3]
     self.assertEqual(
         magni.cs.phase_transition.config['logit_solver'], 'built-in')
     magni.cs.phase_transition._analysis.run(
         self.synthetic_file, 'all_or_none_pt')
     with tb.File(self.synthetic_file, mode='r') as h5_file:
         estimated_pt = h5_file.get_node(
             '/'.join(['', 'all_or_none_pt', 'phase_transition'])).read()
     self.assertTrue(np.allclose(estimated_pt, np.array([1, 0])))
def _switch_cnt(skeletons_file):
    with tables.File(skeletons_file, 'r+') as fid:
        # since here we are changing all the contours, let's just change
        # the name of the datasets
        side1 = fid.get_node('/contour_side1')
        side2 = fid.get_node('/contour_side2')

        side1.rename('contour_side1_bkp')
        side2.rename('contour_side1')
        side1.rename('contour_side2')
예제 #18
0
    def _h_read_data(self):
        skel_table_id, timestamp_inds = self._h_get_table_indexes()

        if not np.array_equal(np.sort(timestamp_inds),
                              timestamp_inds):  #the time stamp must be sorted
            warnings.warn(
                '{}: The timestamp is not sorted in worm_index {}'.format(
                    self.file_name, self.worm_index))

        # use real frames to define the size of the object arrays
        first_frame = np.min(timestamp_inds)
        last_frame = np.max(timestamp_inds)
        n_frames = last_frame - first_frame + 1

        # get the apropiate index in the object array
        ind_ff = timestamp_inds - first_frame

        # get the number of segments from the normalized skeleton
        with tables.File(self.file_name, 'r') as ske_file_id:
            self.n_segments = ske_file_id.get_node('/skeleton').shape[1]

        # add the data from the skeleton_id's and timestamps used
        self.timestamp = np.arange(first_frame, last_frame + 1)

        self.skeleton_id = np.full(n_frames, -1, np.int32)
        self.skeleton_id[ind_ff] = skel_table_id

        # initialize the rest of the arrays
        self.skeleton = np.full((n_frames, self.n_segments, 2), np.nan)
        self.ventral_contour = np.full((n_frames, self.n_segments, 2), np.nan)
        self.dorsal_contour = np.full((n_frames, self.n_segments, 2), np.nan)
        self.widths = np.full((n_frames, self.n_segments), np.nan)

        # read data from the skeletons table
        with tables.File(self.file_name, 'r') as ske_file_id:
            self.skeleton[ind_ff] = \
            ske_file_id.get_node('/skeleton')[skel_table_id, :, :] * self.microns_per_pixel
            self.ventral_contour[ind_ff] = \
            ske_file_id.get_node('/contour_side1')[skel_table_id, :, :] * self.microns_per_pixel
            self.dorsal_contour[ind_ff] = \
            ske_file_id.get_node('/contour_side2')[skel_table_id, :, :] * self.microns_per_pixel
            self.widths[ind_ff] = \
            ske_file_id.get_node('/contour_width')[skel_table_id, :] * self.microns_per_pixel
def change_attrs(fname, field_name):
    print(os.path.basename(fname))
    read_unit_conversions(fname)
    with tables.File(fname, 'r+') as fid:
        group_to_save = fid.get_node(field_name)
        set_unit_conversions(group_to_save,
                             expected_fps=expected_fps,
                             microns_per_pixel=microns_per_pixel)

    read_unit_conversions(fname)
예제 #20
0
def get_imgstr_from_masked(masked_fname):

    # read frame data
    with tables.File(masked_fname, 'r') as fid:
        frame = fid.get_node('/full_data')[0]

    physical_plate_id = preprocess_frame(frame)
    imgstr = encode_img_for_html(physical_plate_id)

    return imgstr
예제 #21
0
def createSampleVideo(masked_image_file,
                      sample_video_name='',
                      time_factor=8,
                      size_factor=5,
                      skip_factor=2,
                      dflt_fps=30,
                      codec='MPEG',
                      shift_bgnd=False):
    #skip factor is to reduce the size of the movie by using less frames (so we use 15fps for example instead of 30fps)

    #%%
    if not sample_video_name:
        sample_video_name = getSubSampleVidName(masked_image_file)

    # initialize timers
    base_name = masked_image_file.rpartition('.')[0].rpartition(os.sep)[-1]
    progressTime = TimeCounter(
        '{} Generating subsampled video.'.format(base_name))

    with tables.File(masked_image_file, 'r') as fid:
        masks = fid.get_node('/mask')
        tot_frames, im_h, im_w = masks.shape
        im_h, im_w = im_h // size_factor, im_w // size_factor

        fps = read_fps(masked_image_file, dflt_fps)

        tt_vec = _getCorrectedTimeVec(fid, tot_frames)
        #%%
        #codec values that work 'H264' #'MPEG' #XVID
        vid_writer = cv2.VideoWriter(sample_video_name, \
                            cv2.VideoWriter_fourcc(*codec), fps/skip_factor, (im_w,im_h), isColor=False)
        assert vid_writer.isOpened()

        if shift_bgnd:
            #lazy bgnd calculation, just take the last and first frame and get the top 95 pixel value
            mm = masks[[0, -1], :, :]
            _bgnd_val = np.percentile(mm[mm != 0], [97.5])[0]

        for frame_number in range(0, tot_frames,
                                  int(time_factor * skip_factor)):
            current_frame = int(tt_vec[frame_number])
            img = masks[current_frame]

            if shift_bgnd:
                img[img == 0] = _bgnd_val

            im_new = cv2.resize(img, (im_w, im_h))
            vid_writer.write(im_new)

            if frame_number % (500 * time_factor) == 0:
                # calculate the progress and put it in a string
                print_flush(progressTime.get_str(frame_number))

        vid_writer.release()
        print_flush(progressTime.get_str(frame_number) + ' DONE.')
예제 #22
0
def switchBlocks(skel_group, skeletons_file, int_group, intensities_file):
    with tables.File(skeletons_file, 'r+') as fid:
        contour_side1 = fid.get_node('/contour_side1')
        contour_side2 = fid.get_node('/contour_side2')
        skeleton = fid.get_node('/skeleton')
        contour_width = fid.get_node('/contour_width')
        
        
        cnt1_length = fid.get_node('/contour_side1_length')
        cnt2_length = fid.get_node('/contour_side2_length')
        
        # w_head_t = fid.get_node('/width_head_tip')
        # w_head_b = fid.get_node('/width_head_base')
        # w_neck = fid.get_node('/width_neck')
        # w_hips = fid.get_node('/width_hips')
        # w_tail_b = fid.get_node('/width_tail_base')
        # w_tail_t = fid.get_node('/width_tail_tip')
            
        for gg in skel_group:
            dat_switch_swap(contour_side1, contour_side2, gg)
            
            dat_switch(skeleton, gg)
            dat_switch(contour_width, gg)
            
            dat_swap(cnt1_length, cnt2_length, gg)
            #dat_swap(w_head_t, w_tail_t, gg)
            #dat_swap(w_head_b, w_tail_b, gg)
            #dat_swap(w_hips, w_neck, gg)
        fid.flush()
        
    with tables.File(intensities_file, 'r+') as fid:
        worm_int_med = fid.get_node('/straighten_worm_intensity_median')
        for gg in int_group:
            dat_switch(worm_int_med, gg)
        
        if '/straighten_worm_intensity' in fid:
            worm_int = fid.get_node('/straighten_worm_intensity')
        
            for ini, fin in int_group:
                dat = worm_int[ini:fin+1, :, :]
                worm_int[ini:fin+1, :, :] = dat[:, ::-1, ::-1]
        fid.flush()
예제 #23
0
    def _convert_raw_shot(self, descriptor):
        """
        Convert a Kitty-generated raw image h5 file to an odin.xray.Shot.
        """

        logger.info('Loading raw image in: %s' % descriptor['data_file'])

        for field in self.essential_fields:
            if field not in descriptor.keys():
                raise ValueError('Essential data field %s not in YAML file!' %
                                 field)

        energy = float(descriptor['photon_eV'])
        path_length = float(descriptor['detector_mm']) * 1000.0  # mm -> um

        # load all the relevant data from many h5s... (dumb)
        f = tables.File(descriptor['x_raw'])
        x = f.root.data.data.read()
        f.close()

        f = tables.File(descriptor['y_raw'])
        y = f.root.data.data.read()
        f.close()

        z = np.zeros_like(x)

        f = tables.File(descriptor['data_file'])
        path = descriptor['i_raw']
        i = f.getNode(path).read()
        f.close()

        # turn that data into a basis representation
        grid_list, flat_i = self._lcls_raw_to_basis(x, y, z, i)

        # generate the detector object
        b = xray.Beam(100, energy=energy)
        d = xray.Detector.from_basis(grid_list, path_length, b.k)

        # generate the shot
        s = xray.Shot(flat_i, d)

        return s
예제 #24
0
def get_food_contour(mask_video,
                     min_area=None,
                     n_bins=180,
                     frac_lowess=0.1,
                     is_debug=False):
    '''
    Identify the contour of a food patch. I tested this for the worm rig.
    It assumes the food has a semi-circular shape. 
    The food lawn is very thin so the challenge was to estimate the contour of a very dim area.
    '''
    #%%
    progress_timer = TimeCounter('')
    base_name = get_base_name(mask_video)
    print_flush('{} Calculating food contour...'.format(base_name))

    try:
        with tables.File(mask_video, 'r') as fid:
            full_data = fid.get_node(
                '/full_data'
            )[:5]  # I am using the first two images to calculate this info
    except tables.exceptions.NoSuchNodeError:
        return None, None

    img = np.max(full_data[:2], axis=0)
    #dark_mask = get_dark_mask(full_data)

    mask = get_patch_mask(img, min_area=min_area)
    circx, circy, best_fit = mask_to_food_contour(mask,
                                                  n_bins=n_bins,
                                                  frac_lowess=frac_lowess)
    #%%
    dd = '{} Food contour calculated. Total time: {}'.format(
        base_name, progress_timer.get_time_str())
    print_flush(dd)
    #%%
    if is_debug:
        from skimage.draw import circle_perimeter
        import matplotlib.pylab as plt

        cpx, cpy = circle_perimeter(*best_fit[1:])

        plt.figure(figsize=(5, 5))
        plt.gca().xaxis.set_ticklabels([])
        plt.gca().yaxis.set_ticklabels([])

        (px, py) = np.where(skeletonize(mask))
        plt.imshow(img, cmap='gray')
        plt.plot(py, px, '.')
        plt.plot(cpx, cpy, '.r')
        plt.plot(circy, circx, '.')
        plt.suptitle(base_name)
        plt.grid('off')
    #%%
    return circx, circy
예제 #25
0
    def from_schafer_file_factory(cls, data_file_path):
        bw = cls()

        with tables.File(data_file_path, 'r') as h:
            # These are all HDF5 'references'
            all_ventral_contours_refs = h.get_node('all_vulva_contours')[:]
            all_dorsal_contours_refs = h.get_node('all_non_vulva_contours')[:]
            all_skeletons_refs = h.get_node('all_skeletons')[:]

            is_stage_movement = utils._extract_time_from_disk(
                h, 'is_stage_movement')
            is_valid = utils._extract_time_from_disk(h, 'is_valid')

            all_skeletons = []
            all_ventral_contours = []
            dorsal_contour = []

            for valid_frame, iFrame in zip(is_valid, range(is_valid.size)):
                if valid_frame:
                    all_skeletons.append(
                        h[all_skeletons_refs[iFrame][0]].value)
                    all_ventral_contours.append(
                        h[all_ventral_contours_refs[iFrame][0]].value)
                    dorsal_contour.append(
                        h[all_dorsal_contours_refs[iFrame][0]].value)
                else:
                    all_skeletons.append(None)
                    all_ventral_contours.append(None)
                    dorsal_contour.append(None)

        # Video Metadata
        is_stage_movement = is_stage_movement.astype(bool)
        is_valid = is_valid.astype(bool)

        # A kludge, we drop frames in is_stage_movement that are in excess
        # of the number of frames in the video.  It's unclear why
        # is_stage_movement would be longer by 1, which it was in our
        # canonical example.
        is_stage_movement = is_stage_movement[0:len(all_skeletons)]

        # 5. Derive frame_code from the two pieces of data we have,
        #    is_valid and is_stage_movement.
        bw.video_info.frame_code = (1 * is_valid + 2 * is_stage_movement +
                                    100 * ~(is_valid | is_stage_movement))

        # We purposely ignore the saved skeleton information contained
        # in the BasicWorm, preferring to derive it ourselves.
        bw.__remove_precalculated_skeleton()
        #bw.h_skeleton = all_skeletons

        bw._h_ventral_contour = all_ventral_contours
        bw._h_dorsal_contour = dorsal_contour

        return bw
예제 #26
0
    def setUp(self):
        self.path = os.path.dirname(os.path.abspath(__file__))
        self.file_ = 'pt_test_data.hdf5'
        shutil.copy(os.path.join(self.path, self.file_), self.file_)
        self.assertTrue(os.path.exists(self.file_))

        self.label = 'test_io_data'
        with tb.File(self.file_, mode='r') as h5_file:
            self.reference_rho = h5_file.get_node(
                '/' + self.label + '/phase_transition').read()
        self.default_delta = np.linspace(0, 1, len(self.reference_rho) + 1)[1:]
예제 #27
0
    def test_set_backup(self):
        magni.cs.phase_transition._backup.set(
            self.path, (0, 0), 1.0, 2.0, 3.0, 4.0)
        done = magni.cs.phase_transition._backup.get(self.path)
        self.assertTrue(np.any(done))

        with tb.File(self.path, mode='r') as h5:
            self.assertEqual(h5.root.time[0, 0], 1.0)
            self.assertEqual(h5.root.dist[0, 0], 2.0)
            self.assertEqual(h5.root.mse[0, 0], 3.0)
            self.assertEqual(h5.root.norm[0, 0], 4.0)
예제 #28
0
def _h_add_stage_position_pix(mask_file, skeletons_file):
    # if the stage was aligned correctly add the information into the mask file
    microns_per_pixel = read_microns_per_pixel(mask_file)
    with tables.File(mask_file, 'r+') as fid:
        timestamp_c = fid.get_node('/timestamp/raw')[:]
        timestamp = np.arange(np.min(timestamp_c), np.max(timestamp_c) + 1)
        stage_vec_inv, ind_ff = _h_get_stage_inv(skeletons_file, timestamp)
        stage_vec_pix = stage_vec_inv[ind_ff] / microns_per_pixel
        if '/stage_position_pix' in fid:
            fid.remove_node('/', 'stage_position_pix')
        fid.create_array('/', 'stage_position_pix', obj=stage_vec_pix)
예제 #29
0
def add_index(save_file, val_frac=0.2, test_frac=0.02):
    with tables.File(save_file, 'r+') as fid:
        if '/index_groups' in fid:
            fid.remove_node('/index_groups')

        fid.create_group('/', 'index_groups')
        tot_samples = fid.get_node('/mask').shape[0]
        index = _get_index(tot_samples, val_frac=val_frac, test_frac=test_frac)

        for field in index:
            fid.create_carray('/index_groups', field, obj=index[field])
예제 #30
0
 def updateVideoFile(self):
     if not os.path.exists(self.vfilename):
         QMessageBox.critical(self, 'The hdf5 video file does not exists', "The hdf5 video file does not exists. Please select a valid file",
                 QMessageBox.Ok)
         return
     
     self.ui.lineEdit_video.setText(self.vfilename)
     self.videos_dir = self.vfilename.rpartition(os.sep)[0] + os.sep
     self.fid = tables.File(self.vfilename, 'r')
     
     self.updateImGroup()