def process_erroneous():
    vid = pims.ImageSequence("D:/ultrasound_2_alex/ss316/new_soundfield/tif/ultrasound_228_3*.tif")
    area = process_frames_err(vid, ((120,180),(0,0)), 20037)
    f = h5py.File('./video/ss_228_run_3.h5', 'w')
    f.create_dataset('data', data=area)
    f.flush()
    f.close()
示例#2
0
def imgseq(run, cam):
    """Load the image sequence given by `run` and `cam`. If not present in the
    `data` folder an image sequence is created from the original video. If that
    video is not present locally, it will be downloaded from the VHub dataset
    repository.

    :param run: Experiment id.
    :type run: str
    :param cam: Camera id, as given by `show()`.
    :type cam: str
    :return: The image sequence.
    :rtype: pims.ImageSequence
    """
    dta = vhub_links[run][cam]
    try:
        fmt = dta['format']
    except KeyError:
        dta = dta['bare']
        fmt = dta['format']
    if isinstance(dta['src'], list):
        camlabel = dta['src'][0]
        camlabel = camlabel[:camlabel.rfind('-')]
    else:
        camlabel = dta['src']
        camlabel = camlabel[:-14]
    camlabel = camlabel[camlabel.rfind('_') + 1:]
    base = "data%s%s_%s%s" % (os.sep, run, camlabel, os.sep)
    if not os.path.exists(base):
        if fmt == "video_mp4":
            if not os.path.exists(base + ".mp4"):
                download_dataset(run=run, cam=camlabel)
            convert_video_to_imgseq(_videoname(run, cam), base)
        elif fmt == "zip-archive":
            if not os.path.exists(base[:-1] + "-0.zip"):
                src = download_dataset(run=run, cam=cam)
            else:
                datadir = os.path.dirname(base[:-1])
                src = [
                    datadir + os.sep + f for f in os.listdir(datadir)
                    if f.endswith('.zip') and os.path.basename(base[:-1]) in f
                ]
            unarchive_imgseq(src=src, base=base)
        else:
            raise ValueError("Got an unknown format '%s' for run '%s', "
                             "cam '%s'" % (fmt, run, cam))
    lbl = ""
    for f in os.listdir(base):
        lbl = f[f.rfind('.') + 1:]
        if lbl in img_format_labels:
            break
    if lbl not in img_format_labels:
        raise ImageFormatError("Did not find any valid image files in %s.\n"
                               "Valid image types are: %s" %
                               (base, img_format_labels))
    if not show_warnings:
        warnings.simplefilter("ignore", UserWarning)
    ret = pims.ImageSequence(base + "*." + lbl,
                             as_grey=True,
                             dtype=numpy.float)
    return ret
def _getSphCoords(PATH, experiment, time, CHANNEL, wellDiameter,
                  marginDistance, aspectRatio):
    """
    CORE FUNCTION:

    Function to retrieve the spheroid coordinates from the BF images. Relies
    upon ID by max gradient values.

    """

    img = pims.ImageSequence(os.path.join(PATH, experiment, CHANNEL, '*.tif'),
                             as_grey=True)
    imToCrop = img[int(time)]

    xCenter, yCenter = _getCenter(img[0], wellDiameter, wellDiameter,
                                  aspectRatio)

    BFimage = _cropper(imToCrop, (xCenter, yCenter), wellDiameter,
                       marginDistance, aspectRatio)
    rRegion = _findSpheroid(BFimage, wellDiameter, aspectRatio, marginDistance)

    # Image the segmentation to keep intermediary result of the segmentation.
    _verifySegmentation(BFimage, rRegion, PATH, experiment, time)

    return np.nonzero(rRegion)
示例#4
0
def read_img_sequence(path, file_extension):
    pims_sequence = pims.ImageSequence(join(path,
                                            '*.{}'.format(file_extension)),
                                       process_func=None)
    # return np.stack([frame.copy() for frame in pims_sequence], axis=2)
    return np.stack([frame.astype(np.uint16) for frame in pims_sequence],
                    axis=2)
示例#5
0
 def setUpClass(cls):
     super(TestReproducibility, cls).setUpClass()
     # generate a new file
     video = pims.ImageSequence(
         os.path.join(path, 'video', 'image_sequence'))
     actual = tp.batch(invert_image(video), diameter=9, minmass=240)
     actual = tp.link_df(actual, search_range=5, memory=2)
     actual.to_csv(reproduce_fn)
示例#6
0
def load_tiff_stack(dirname):
    print("\t--> Loading video: " + dirname)
    imseq = pims.ImageSequence(os.path.join(dirname, '*.tif'))
    imgs = np.zeros((imseq.frame_shape[0], imseq.frame_shape[1], len(imseq)))
    for i in range(len(imseq)):
        imgs[:, :, i] = imseq.get_frame(i)

    return imgs
示例#7
0
def show_annotated_first_frame(data, img_dir_path):
    """
    :param data: dataframe
    :param img_dir_path: string
    :return:
    """
    frames = pims.ImageSequence('{}*.jpg'.format(img_dir_path))
    plt.figure()
    tp.annotate(data[data['frame'] == 0], frames[0])
示例#8
0
    def __init__(self, trial, xy, edgeMin=10, edgeMax=10, boost=1):
        paths = ensureDataPath().paths
        imgPath = paths.images[trial].format(xy)
        self.images = pims.ImageSequence(imgPath, as_grey=True)
        self.frames = []

        for i, img in enumerate(self.images):
            boundary = FindBoundary(i, img, edgeMin, edgeMax, boost)
            self.frames.append(boundary)
            # Display all predicted boundaries
            # plt.plot(*boundary.segments[0], color='yellow')
示例#9
0
    def __init__(self, input_folder, output_folder, t, series, count):
        self.frames = pims.ImageSequence(input_folder, process_func=None)
        self.t = t
        self.series = series
        self.cell_count = count
        self.output_folder = output_folder
        (w, h) = self.frames[0].shape
        self.frames_output = np.zeros(shape=(t, w, h), dtype=np.uint8)
        self.frames_output[0] = self.frames[0]

        self.regions_0 = []
        self.regions_1 = []
示例#10
0
def _open_folder(path):

    # Check all the files in the folder
    file_in_folder = glob(os.path.join(path, '*.*'))

    # Check that the files in the folder can be opened
    file_in_folder = _check_extensions(file_in_folder)

    # Open all the images
    sequence = pims.ImageSequence(path)

    return np.array(sequence)
示例#11
0
def _selectCells(cellFrame, experiment, IMAGECHANNEL, PATH,
                      maskSize, wellSize, aspectRatio):

    img = pims.ImageSequence(os.path.join(PATH, experiment, IMAGECHANNEL, '*.tif'), as_grey=True)

    im = img[np.min(cellFrame['frame'].dropna())]
    xCenter, yCenter = SpheroidSegmentBF._getCenter(im,maskSize,wellSize,aspectRatio)

    cellFrame['xCenter'] = xCenter
    cellFrame['yCenter'] = yCenter

    areaToKeep = maskSize*aspectRatio
    cellFrame = cellFrame[(cellFrame['x']-xCenter)**2 + (cellFrame['y']-yCenter)**2 < areaToKeep**2]

    return cellFrame
示例#12
0
    def setUpClass(cls):
        super(TestReproducibility, cls).setUpClass()
        npz = np.load(reproduce_fn)
        cls.expected_find_raw = npz['arr_0']
        cls.expected_find_bp = npz['arr_1']
        cls.expected_refine = npz['arr_2']
        cls.expected_locate = npz['arr_3']
        cls.coords_link = npz['arr_4']
        cls.expected_link = npz['arr_5']
        cls.expected_link_memory = npz['arr_6']
        cls.expected_characterize = npz['arr_7']

        cls.v = pims.ImageSequence(
            os.path.join(path, 'video', 'image_sequence', '*.png'))
        cls.v0_inverted = invert_image(cls.v[0])
示例#13
0
 def create_from_image_sequence(self, folder):
     if not PIMS:
         print "Failed to create video object from image sequence. PIMS is not available."
     else:
         images = pims.ImageSequence(folder, as_grey=True)
         self.n = images.__len__()
         self.rows, self.cols = images[0].shape
         self.m = self.rows * self.cols
         self.frames = np.zeros((self.rows, self.cols, self.n))
         self.X = np.zeros((self.m, self.n))
         for i in xrange(self.n):
             print "reading input frame ", i
             self.frames[:, :, i] = images.get_frame(i)
             self.X[:, i] = np.reshape(images.get_frame(i), (self.m, ),
                                       order='F')
示例#14
0
def _cropByWell(PATH, maskSize, wellSize, aspectRatio):

    if not os.path.exists(os.path.join(PATH, 'cropped')):
        os.mkdir(os.path.join(PATH, 'cropped'))

    img = pims.ImageSequence(os.path.join(PATH, '*.tif'), as_grey=True)
    i = 0
    for im in tqdm(img):

        skimage.external.tifffile.imsave(
            os.path.join(PATH, 'cropped', 'crop_%d.tif' % i),
            _crop(im, im, maskSize, wellSize, aspectRatio))
        i += 1

    return
示例#15
0
def _cropAll(PATH, maskSize, wellSize, aspectRatio):

    if not os.path.exists(PATH + r'\cropped'):
        os.mkdir(PATH + r'\\' + 'cropped')

    img = pims.ImageSequence(PATH + '\\*.tif', as_grey=True)
    i = 0

    for im in tqdm(img):

        skimage.external.tifffile.imsave(
            PATH + r'\\cropped\\' + 'crop_%d.tif' % i,
            _crop(im, im, maskSize, wellSize, aspectRatio))
        i += 1

    return
示例#16
0
def analyze_frames(number, x_crop):
    film = os.path.dirname(number)
    # Format image sequence for processing
    frames = pims.ImageSequence(number + '//*.bmp', as_grey=True)
    datastorename = os.path.join(film, 'data-t' + number.strip()[-1] + '.h5')

    with tp.PandasHDFStoreBig(datastorename) as s:
        # Normalize the pictures to black and white, and identify islands
        normalize_pictures_parallel(frames, number, s, x_crop)
        print('\t Identifying tracks and linking...')

        # Connect and track particles. Saving tracks to 's'.
        pred = tp.predict.ChannelPredict(5, minsamples=3)
        for linked in pred.link_df_iter(s, 6):
            s.put(linked)
        all_results = s.dump()
    return all_results
 def __init__(self, label_dir, root_dir):
     """
     Args:
         csvfile (pathlib): folder containing labels
         root_dir (pathlib): root directory with all the images
     """
     self.root_dir = root_dir
     self.label_dir = label_dir
     self.labels = self.process_labels()
     self.frames = pims.ImageSequence(str(root_dir / '*.bmp'))
     self.shape = self.frames[1].shape
     ar = np.array(self.frames).reshape(-1, 4)
     self.meand = ar.mean(axis=0)
     self.stdd = ar.std(axis=0)
     ar = None
     self.n = self.shape[0]
     #self.to_tensor = transforms.Compose([transforms.ToTensor(), transforms.Normalize(tuple(self.meand[0:3]),tuple(self.stdd[0:3]))])
     self.to_tensor = transforms.ToTensor()
示例#18
0
    def OnBtnAnalystClickedSlot(self):
        """点击加载按钮"""

        # 容错
        if self.__pictrureName is None or len(self.__pictrureName) == 0:
            return

        # 加载图片
        frames = pims.ImageSequence(self.__pictrureName, as_grey=True)
        plt.imshow(frames[0])

        # 查找特征点,返回一个DataFrame
        features = tp.locate(frames[0], 11, invert=True)
        features.head()

        # 分析显示
        plt.figure()  # make a new figure
        tp.annotate(features, frames[0])
示例#19
0
def outputStrainpool((rawDir, tmpDir, filePrefix, scorethresh)):
        frameno = filePrefix.split("Image")[1].split("-")[1].split(".dcm")[0]
        videoFile = "Image-" + frameno + ".dcm"
        print(videoFile)
        try:
            ft, hr, nrow, ncol, x_scale, y_scale = extractmetadata(rawDir, videoFile) 
            print(ft, hr, nrow, ncol, x_scale, y_scale)
            if not (ft==None or hr == None or x_scale == None or y_scale == None):
                window =  int(((60 /hr) / (ft / 1000))) #approximate number of frames per cardiac cycle
                deltax = x_scale
                deltay = y_scale
                if window > 10:
                    xvel = 8 #(cm/s)
                    yvel = 8 
                    dxmax = (ft/1000)*(xvel)/deltax
                    dymax = (ft/1000)*(yvel)/deltay
                    #print("dxmax", dxmax, "dymax", dymax)
                    for direction in ["left", "right"]:
                        videodir = tmpDir + "/" + videoFile + "/maskedimages_" + direction + "/" 
                        print(videodir)
                        frames = pims.ImageSequence(videodir + '/*.png', as_grey=True)
                        nrow = frames[0].shape[0]
                        ncol = frames[0].shape[1]
                        minmass = 130
                        partlimit = 100
                        def adjustcurrentpart(partlimit, minmass, frames):
                            currentpart = computepartno(minmass, frames, 1)
                            if currentpart < partlimit and minmass >= 80:
                                minmass = minmass - 10
                                adjustcurrentpart(partlimit, minmass, frames)
                        adjustcurrentpart(partlimit, minmass, frames)
                        end = len(frames) - 1
                        print("size of segment", len(np.where(frames[0] > 0)[0]))
                        print("comparison", 0.005*nrow*ncol)
                        if not len(np.where(frames[0] > 0)[0]) < 0.005*nrow*ncol: #heuristic filter for bad segmentation
                            framelo = 0
                            framehi = end
                            print(videoFile, direction + " side")
                            outputStrain_window(frames, framelo, framehi, minmass, 
                                            scorethresh, dxmax, dymax, tmpDir, 
                               videoFile, nrow, ncol, direction)
        except (IOError, EOFError, KeyError) as e:
                print(videoFile, e)
        return 1
示例#20
0
def outputstrain(dicomdir, videofile, scorethresh, ft, nrow, ncol, window,
                 x_scale, y_scale):
    '''

    '''
    print("computing strain", dicomdir, videofile)
    center, lvlength, lvwidth, thetainit = preprocess(dicomdir, videofile)
    deltax = x_scale
    deltay = y_scale
    xvel = 8  #(cm/s) myocardial velocity
    yvel = 8
    dxmax = (ft / 1000) * (xvel) / deltax  #maximum movement anticipated
    dymax = (ft / 1000) * (yvel) / deltay  #maximum movement anticipated
    for direction in ["left", "right"]:
        videodir = dicomdir + "/maskedimages_" + \
        direction + "/" + videofile
        print("reading in data")
        frames = pims.ImageSequence(videodir + '/*.png', as_grey=True)
        print("data read")
        minmass = 130
        partlimit = 100

        def adjustcurrentpart(partlimit, minmass, frames):
            '''
            recursive method to obtain enough trackable particles
            '''
            currentpart = computepartno(minmass, frames, 1)
            if currentpart < partlimit and minmass >= 80:
                minmass = minmass - 10
                adjustcurrentpart(partlimit, minmass, frames)

        adjustcurrentpart(partlimit, minmass, frames)
        end = len(frames) - 1
        if not len(
                np.where(frames[0] > 0)
            [0]) < 0.01 * nrow * ncol:  #heuristic filter for bad segmentation
            framelo = 0
            framehi = end
            outputstrain_window(frames, framelo, framehi, minmass, scorethresh,
                                dxmax, dymax, dicomdir, videofile, nrow, ncol,
                                direction)
    return 1
    def __init__(self, path, fps, lens_magnification):
        """

        :param path:
        :param fps:
        :param lens_magnification:
        """

        self.path = path
        self.files = [f for f in glob.glob(self.path + '**/*.tif', recursive=True)]
        self.image_sequence = pims.ImageSequence(path + '**/*.tif', as_grey=True)
        self.size = self.image_sequence.frame_shape
        self.frame_number = len(self.files)
        self.fps = fps
        self.lens_magnification = lens_magnification
        self.particle_trajectory_list = None
        self.algae_trajectory_list = None
        self.minpoints = None
        self.cmin = 255
        self.cmax = 0
示例#22
0
def _verifyCellState(cellFrame, PATH, experiment, IMAGECHANNEL, frame, wellDiameter,
                        aspectRatio, marginDistance):

    if not os.path.exists(os.path.join(PATH, experiment, 'CD8 on spheroid test')):
        os.makedirs(os.path.join(PATH, experiment, 'CD8 on spheroid test'))

    savePath = os.path.join(PATH, experiment, 'CD8 on spheroid test')

    img = pims.ImageSequence(os.path.join(PATH, experiment, IMAGECHANNEL, '*.tif'), as_grey=True)
    img = img[frame]

    cropDist = wellDiameter*aspectRatio

    loc = cellFrame[cellFrame['frame'] == frame]
    xCenter = loc['xCenter'].iloc[0]
    yCenter = loc['yCenter'].iloc[0]

    i = SpheroidSegmentBF._crop(img, img, (xCenter, yCenter), wellDiameter, wellDiameter, aspectRatio)
    r = SpheroidSegmentBF._cropper(img, (xCenter, yCenter), wellDiameter, marginDistance, aspectRatio)
    rRegion = SpheroidSegmentBF._findSpheroid(r, wellDiameter, aspectRatio, marginDistance)

    x = loc['x'] - yCenter + cropDist/2
    y = loc['y'] - xCenter + cropDist/2

    fig, ax = plt.subplots(1,1, figsize = (5,5))
    plt.imshow(i, cmap='gray', origin = 'lower')
    plt.imshow(rRegion, alpha = 0.1, origin = 'lower')
    plt.scatter(x, y, c = loc['state'], label = loc['state'])

    for particle in loc['particle'].unique():

        state = loc.loc[loc['particle'] == particle, 'state'].iloc[0]
        xplot = loc.loc[loc['particle'] == particle, 'x'].iloc[0] - yCenter + cropDist/2
        yplot = loc.loc[loc['particle'] == particle, 'y'].iloc[0] - xCenter + cropDist/2
        plt.text(xplot, yplot, state)

    plt.savefig(os.path.join(savePath, 'testFrame_' + str(frame) +'.jpeg'))
    plt.legend()
    plt.close(fig)
    return
示例#23
0
def save_data(frame_dir, save_to_dir):
    """
    Takes a dirpath containing video frames and extracts the data.
    :param frame_dir: the directory containing the frames.
    :param save_to_dir: the directory path where we save our data, without the last '/'.
    :return:
    """
    print("Getting frame data from " + frame_dir)
    frames = pims.ImageSequence(frame_dir + '/' + FRAME_NAME + '*.jpg',
                                as_grey=True)
    try:
        data = tp.batch(frames[:-2],
                        PARTICLE_SIZE,
                        invert=True,
                        minmass=MIN_MASS,
                        percentile=PERCENTILE)
    except OSError:
        pass
    frame_dirname = os.path.basename(frame_dir)
    out_filepath = save_to_dir + '/' + frame_dirname + '.csv'
    print("Writing frame data to " + out_filepath)
    data.to_csv(out_filepath)
示例#24
0
    def __init__(self, input_folder, output_folder, t, series):
        self.frames = pims.ImageSequence(input_folder, process_func=None)
        self.t = t
        self.series = series
        self.track = []  #[[1,2,1,1,1,0,1,.....],[]]
        self.mapping = []  #[['1_1','2_3','3_2'], ..., ...]
        self.output_folder = output_folder
        (w, h) = self.frames[0].shape
        self.frames_output = np.zeros(shape=(t, w, h), dtype=np.uint8)

        i = 0
        while (series[i] == 0):
            i = i + 1
        if (i < t):
            self.frames_output[i] = self.frames[
                i]  # The first frame is initialized

        # get the number of cells, assume it's constant
        regions = skimage.measure.regionprops(
            self.frames_output[i], intensity_image=self.frames_output[i])
        self.N = len(regions)

        self.regions_0 = []
        self.regions_1 = []
示例#25
0
        img = img.copy()[..., ::-1].reshape(112, 112, 3)
        img = Image.fromarray(img)
        mirror = transforms.functional.hflip(img)
        with torch.no_grad():
            fea = learner.model(conf.test_transform(img).cuda().unsqueeze(0))
            fea_mirror = learner.model(conf.test_transform(mirror).cuda().unsqueeze(0))
            fea = l2_norm(fea + fea_mirror).cpu().numpy().reshape(512)

        return fea


    def extract_fea(res):
        res3 = {}
        for path, img in res.items():
            res3[path] = extract_fea_from_img(img)
        return res3


    # res, res2 = lz.msgpack_load(lz.work_path + '/yy.yy2.pk')
    # res3 = extract_fea(res)
    # res4 = extract_fea(res2)
    # lz.msgpack_dump([res3, res4], lz.work_path + 'yy.yy2.fea.pk')

    imgs = pims.ImageSequence(lz.work_path + 'face.yy2/gallery/*.png', process_func=cvb.rgb2bgr)
    res = {}
    for img, path in zip(imgs, imgs._filepaths):
        print(path)
        fea = extract_fea_from_img(img)
        res[path] = fea
    lz.msgpack_dump(res, lz.work_path + '/face.yy2/gallery/gallery.pk')
示例#26
0
    # Apply greyscale
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # Apply thresholds
    img = filters.threshold_local(img, 3)
    threshold = 0.5
    idx = img > img.max() * threshold
    idx2 = img < img.max() * threshold
    img[idx] = 0
    img[idx2] = 255
    # Dilatate to get a continous network
    # of liquid films

    n_dilat = 1
    for _ in range(n_dilat):
        img = ndimage.binary_dilation(img)
        print(img)
    return util.img_as_int(img)


frames = pims.ImageSequence(os.path.join(datapath, prefix + '*.png'),
                            process_func=preprocess_foam)
print(frames)

img = frames[200]
print(img)
cv2.imshow("treated image", img)
cv2.waitKey(0)

# fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(8, 4))
# ax.imshow(img)
# plt.show()
示例#27
0
def get_data(outdir, red, green, blue, diam=11):
    ''' Loads the output of the preprocessing steps for particle extraction
        Returns the formatted data
    '''
    frames = pims.ImageSequence("../" + outdir + "/*tif")
    print frames

    # particle diameter
    features = tp.batch(frames[:frames._count], diameter=diam, \
                        minmass=1, invert=True)

    # Link features in time
    search_range = diam - 2  # sigma_(max)

    lframes = int(np.floor(frames._count / 3))  # r, g, b images are loaded
    imax = int(np.floor(15 * lframes / 100))  # default max 15% frame count
    t = tp.link_df(features, search_range, memory=imax)
    # default neighbour strategy: KDTree

    # Filter spurious trajectories
    imin = int(np.floor(10 * lframes / 100))  # default min 10% frame count
    t1 = tp.filter_stubs(t, imin)  # if seen in imin

    # Compare the number of particles in the unfiltered and filtered data
    print 'Unique number of particles (Before filtering):', t[
        'particle'].nunique()
    print '(After):', t1['particle'].nunique()

    # export pandas data frame with filename being current date and time
    timestr = time.strftime("%Y%m%d-%H%M%S")

    data = pd.DataFrame({ 'x': t1.x, 'y': t1.y,'z':t1.frame,\
                        'mass':t1.mass, 'size':t1.size, 'ecc':t1.ecc,\
                        'signal':t1.signal, 'ep':t1.ep, 'particle':t1.particle\
                        })

    # format the dataframe / original indexing
    data["n"] = np.arange(len(data))

    print("Sorting dataframe by time...")
    data = data.sort(columns='z', ascending=True)

    print("Extracting pixel values of particles...")
    r, g, b = get_val(red, 2, data), get_val(green, 1,
                                             data), get_val(blue, 0, data)

    print("Normalising rgb values to relative quantities...")
    r1, g1, b1 = np.array(r), np.array(g), np.array(b)
    r = (r1 - np.min(r1)) * (65535 / np.max(r1))
    g = (g1 - np.min(g1)) * (65535 / np.max(g1))
    b = (b1 - np.min(b1)) * (65535 / np.max(b1))

    print("Adding (r,g,b) values as columns to dataframe...")
    strname, px_val = ["r", "g", "b"], [r, g, b]
    add_arrays_df(strname, px_val, data)

    # sort back to original state
    data = data.sort(columns='n', ascending=True)

    # remove the previously created column
    data.drop('n', axis=1, inplace=True)

    # format df with rgb values to uint8
    data = format_df(data)

    print "Dataframe summary:\n", data.describe()
    file_name = "../particles_" + timestr + ".csv"
    print "Exporting %s" % (file_name)
    data.to_csv(file_name, sep='\t', encoding='utf-8')

    return data
示例#28
0
def run_trials(prjd,
               bright_fn_marker,
               test=False,
               force=False,
               cores=4,
               rerun_step=''):
    """
    runs the analysis.   
    
    Nomenclature:
    `project` is a folder containing many timelapses,
    timelapses are called `trials`,
    each `trial`, after analysis, would contain `cells`.
    
    :param prjd: path to (`project`) folder containing `trials`
        eg. if the images are stored in such a way
        
        images_190919 (`project`)
            WT-WT_001 (`trial`)
                tifs ..
            WT-WT_002 (`trial`)
                tifs ..
            WT-WT_003 (`trial`)
                tif ..
            WT-WT_004 (`trial`)
                tifs ..
        
        Here, `images_190919` will be prjd
        so the correct command will be
        python endocytosis.py run-trials /path/to/images_190919 _T1C1
        
    :param bright_fn_marker: _t if inhouse microscope else if chul: _T1C1    
    :param rerun_step: flag_segmentation_done, flag_cells_done, flag_cellframes_done, flag_distances_done

    """
    if not rerun_step is None:
        if isinstance(rerun_step, list):
            rerun_step = '-'.join(rerun_step)

    # make cfg for the project
    from htsimaging.lib.io_cfg import make_project_cfg, make_cell_cfg
    cfg = make_project_cfg(prjd,
                           bright_fn_marker,
                           cores=cores,
                           test=test,
                           force=force)
    ## get segments from brightfield images
    if (not 'flag_segmentation_done'
            in cfg) or force or ('flag_segmentation_done' in rerun_step):
        from htsimaging.lib.segment import run_yeastspotter
        cfg['yeastspotter_srcd'] = f"{dirname(realpath(__file__))}/../deps/yeast_segmentation"
        logging.info(cfg.keys())
        cfg = run_yeastspotter(cfg, test=test)
        to_dict(cfg, cfg['cfgp'])
        cfg['flag_segmentation_done'] = True
        print('flag_segmentation_done')

#     if not '' in cfg:
## get and filter cells from segments images
    if (not 'flag_cells_done' in cfg) or force or ('flag_cells_done'
                                                   in rerun_step):
        from htsimaging.lib.segment import segmentation2cells
        for trial in cfg['trials']:
            if len(cfg['trials'][trial]['bright']) != 0:
                cellsps = []
                for imp, imsegp in zip(
                        cfg['trials'][trial]['bright'],
                        cfg['trials'][trial]['bright_segmented']):
                    cellsp = f'{imsegp}.npy'
                    regions = segmentation2cells(
                        imp,
                        imsegp,
                        magnification=cfg['magnification'],
                        plotp=
                        f"{cfg['trials'][trial]['plotd']}/image_segmentation2cells.png"
                    )
                    np.save(cellsp, regions)
                    cellsps.append(cellsp)
                cfg['trials'][trial]['bright_segmented_cells'] = cellsps
        cfg['flag_cells_done'] = True
        to_dict(cfg, cfg['cfgp'])
        print('flag_cells_done')

    if (not 'flag_cellframes_done' in cfg) or force or ('flag_cellframes_done'
                                                        in rerun_step):
        from htsimaging.lib.segment import get_cellboxes
        from htsimaging.lib.utils import get_cellprops
        cellcfgps = []
        for trial in cfg['trials']:
            frames = pims.ImageSequence(np.sort(cfg['trials'][trial]['gfp']),
                                        as_grey=True)
            cellsp = np.sort(cfg['trials'][trial]['bright_segmented_cells'])[
                0]  # only bright field at the start
            cells = np.load(cellsp)
            dcellprops = get_cellprops(cells,
                                       intensity_imgtype2img={
                                           "gfp mean":
                                           np.mean(frames, axis=0),
                                           "bright":
                                           tifffile.imread(
                                               cfg['trials'][trial]['bright'])
                                       })
            to_table(dcellprops, f"{cellsp}.cellprops.tsv")
            cellboxes = get_cellboxes(
                cells,
                plotp=f"{cfg['trials'][trial]['plotd']}/image_get_cellboxes.png"
            )
            for celli, cellbox in enumerate(cellboxes):
                logging.info(f"{trial};cell{celli+1:08d}")
                # make cg for cell
                cellcfg = make_cell_cfg(
                    cfg,
                    frames,
                    cells,
                    trial,
                    celli,
                    cellbox,
                    test=test,
                    force=force if rerun_step == '' else True)
                cellcfgps.append(cellcfg['cfgp'])
        cfg['cellcfgps'] = cellcfgps
        cfg['flag_cellframes_done'] = True
        to_dict(cfg, cfg['cfgp'])
        print('flag_cellframes_done')

        # parallel processing
    if (not 'flag_distances_done' in cfg) or force or ('flag_distances_done'
                                                       in rerun_step):
        from htsimaging.lib.spt import apply_cellcfgp2distances
        cellcfgps = np.sort(cfg['cellcfgps'])
        if len(cellcfgps) != 0:
            print(f"{get_datetime()}: processing: {len(cellcfgps)} cells.")
            for cellcfgp in cellcfgps:
                cellcfg_ = read_dict(cellcfgp)
                cellcfg_['force'] = force if rerun_step == '' else True
                to_dict(cellcfg_, cellcfgp)
            if not test:
                pool = Pool(processes=cfg['cores'])
                pool.map(apply_cellcfgp2distances, cellcfgps)
                pool.close()
                pool.join()
            else:
                for cellcfgp in cellcfgps:
                    logging.info(f'processing {cellcfgp}')
                    apply_cellcfgp2distances(cellcfgp)
        cfg['flag_distances_done'] = True
        to_dict(cfg, cfg['cfgp'])
        print('flag_distances_done')
    print('finished')
    return frameNum, X_pos, Y_pos, Speed

    #VideoFolder = '/Users/deepak/Dropbox/GravityMachine/ExperimentResults/AbioticExperiments'


VideoFolder = '/Volumes/GRAVMACH1/GravityMachine/2017_06_01_DissolvingCrystalsInFlow'

DestinationFolder = os.path.join(VideoFolder, 'TrackOverlaySimple')
if (not os.path.exists(DestinationFolder)):
    os.makedirs(DestinationFolder)

#VideoFile = 'kiss_tumble_1214_1343'
VideoFile = 'Substack_1_99'

frames = pims.ImageSequence(os.path.join(VideoFolder, VideoFile), as_grey=True)

FPS = 60

BeadFile = 'SugarTrack.csv'

ParticleFile = 'ParticleTrack.csv'

frame_depth = 100

#print('Total number of frames in the Folder : {}'.format(totalFrames))
print('Video frame rate: {} fps'.format(FPS))

frameNum_bead, X_pos_bead, Y_pos_bead, Speed_bead = loadCSV(
    VideoFolder, BeadFile)
示例#30
0
 def test_zipfile(self):
     pims.ImageSequence(self.tempfile)[0]