Exemple #1
0
def get_folders(input_path, pressure=None, repetition=None):
    if isinstance(input_path, (str, Path)):
        input_path = [input_path]
    paths = []
    for path in input_path:
        if "*" in str(path):
            glob_data = glob.glob(path, recursive=True)
            if repetition is not None:
                glob_data = glob_data[repetition:repetition + 1]
            paths.extend(glob_data)
        else:
            paths.append(path)

    new_paths = []
    for file in paths:
        try:
            config = getConfig(file)
            new_paths.append(file)
        except OSError as err:
            print(err, file=sys.stderr)
            continue
    paths = new_paths

    if pressure is not None:
        pressures = []
        for index, file in enumerate(paths):
            config = getConfig(file)
            pressures.append(config['pressure_pa'] / 100_000)

        paths = np.array(paths)
        pressures = np.array(pressures)
        paths = paths[pressures == pressure]

    return paths
Exemple #2
0
def to_filelist(paths, reevaluate=False):
    import glob
    from pathlib import Path
    from deformationcytometer.includes.includes import getConfig

    if not isinstance(paths, list):
        paths = [paths]
    files = []
    for path in paths:
        if path.endswith(".tif") or path.endswith(".cdb"):
            if "*" in path:
                files.extend(glob.glob(path, recursive=True))
            else:
                files.append(path)
        else:
            files.extend(glob.glob(path + "/**/*.tif", recursive=True))
    files2 = []
    for filename in files:
        if reevaluate or not Path(
                str(filename)[:-4] + "_evaluated_config_new.txt").exists():
            # check if the config file exists
            try:
                config = getConfig(filename)
            except (OSError, ValueError) as err:
                print(err)
                continue
            files2.append(filename)
        else:
            print(filename, "already evaluated")
    return files2
def get_pressures(input_path, repetition=None):
    paths = get_folders(input_path, repetition=None)

    pressures = []
    for index, file in enumerate(paths):
        config = getConfig(file)
        pressures.append(config['pressure_pa'] / 100_000)

    return np.array(pressures)
def plotPathList(paths):
    global ax
    paths = list(paths)
    print(paths)
    fit_data = []

    data_list = []
    for index, file in enumerate(paths):
        output_file = Path(str(file).replace("_result.txt", "_evaluated.csv"))

        # load the data and the config
        data = getData(file)
        config = getConfig(file)
        """ evaluating data"""
        if not output_file.exists():
            #refetchTimestamps(data, config)

            getVelocity(data, config)

            # take the mean of all values of each cell
            data = data.groupby(['cell_id']).mean()

            correctCenter(data, config)

            data = filterCells(data, config)

            # reset the indices
            data.reset_index(drop=True, inplace=True)

            getStressStrain(data, config)

            #data = data[(data.stress < 50)]
            data.reset_index(drop=True, inplace=True)

            data["area"] = data.long_axis * data.short_axis * np.pi
            data.to_csv(output_file, index=False)

        data = pd.read_csv(output_file)

        #data = data[(data.area > 0) * (data.area < 2000) * (data.stress < 250)]
        #data.reset_index(drop=True, inplace=True)

        data_list.append(data)

    data = pd.concat(data_list)
    data.reset_index(drop=True, inplace=True)

    fitStiffness(data, config)

    plotDensityScatter(data.stress, data.strain)
    #plotStressStrainFit(data, config)
    plotBinnedData(data.stress, data.strain,
                   [0, 10, 20, 30, 40, 50, 75, 100, 125, 150, 200, 250])
    #plt.title(f'{config["fit"]["p"][0] * config["fit"]["p"][1]:.2f}')
    fit_data.append(config["fit"]["p"][0] * config["fit"]["p"][1])

    return fit_data
Exemple #5
0
 def filter(file):
     try:
         config = getConfig(file)
     except OSError as err:
         print(err, file=sys.stderr)
         return False
     data_pressure = config['pressure_pa'] / 100_000
     if pressure is not None and data_pressure != pressure:
         print("filtered due to pressure")
         return False
     return True
Exemple #6
0
def process_load_images(filename):
    """
    Loads an .tif file stack and yields all the images.
    """
    import imageio
    from deformationcytometer.detection import pipey
    from deformationcytometer.detection.includes.regionprops import preprocess, getTimestamp
    from deformationcytometer.includes.includes import getConfig
    import clickpoints

    print("start load images", filename)
    log("1load_images", "prepare", 1)

    # open the image reader
    reader = imageio.get_reader(filename)
    # get the config file
    config = getConfig(filename)
    # get the total image count
    image_count = len(reader)

    print("create cdb", filename[:-4]+".cdb")
    if write_clickpoints_file:
        cdb = clickpoints.DataFile(filename[:-4]+".cdb", "w")
        cdb.setMaskType("prediction", color="#FF00FF", index=1)

    yield dict(filename=filename, index=-1, type="start")
    log("1load_images", "prepare", 0)

    log("1load_images", "read", 1)
    # iterate over all images in the file
    for image_index, im in enumerate(reader):
        if image_index == image_count:
            break
        # ensure image has only one channel
        if len(im.shape) == 3:
            im = im[:, :, 0]
        # get the timestamp from the file
        timestamp = float(getTimestamp(reader, image_index))

        if write_clickpoints_file:
            cdb.setImage(filename, frame=image_index)#, timestamp=timestamp)
        log("1load_images", "read", 0, image_index)
        # return everything in a nicely packed dictionary
        yield dict(filename=filename, index=image_index, type="image", timestamp=timestamp, im=im, config=config,
                   image_count=image_count)
        if image_index < image_count - 1:
            log("1load_images", "read", 1, image_index + 1)

    yield dict(filename=filename, index=image_count, type="end")
Exemple #7
0
def process_find_cells(video, mask_queue):
    import tqdm
    import imageio
    from pathlib import Path
    from deformationcytometer.includes.includes import getConfig
    from deformationcytometer.detection.includes.regionprops import save_cells_to_file, mask_to_cells_edge, getTimestamp

    # get image and config
    vidcap = imageio.get_reader(video)
    config = getConfig(video)

    cells = []

    # initialize the progressbar
    with tqdm.tqdm(total=len(vidcap)) as progressbar:
        # update the description of the progressbar
        progressbar.set_description(f"{len(cells)} good cells")

        for batch_images, batch_image_indices, prediction_mask_batch in queue_iterator(
                mask_queue):
            # iterate over the predicted images
            for batch_index in range(len(batch_image_indices)):
                image_index = batch_image_indices[batch_index]
                im = batch_images[batch_index]
                prediction_mask = prediction_mask_batch[batch_index]

                # get the images in the detected mask
                cells.extend(
                    mask_to_cells_edge(prediction_mask,
                                       im,
                                       config,
                                       r_min,
                                       frame_data={
                                           "frame":
                                           image_index,
                                           "timestamp":
                                           getTimestamp(vidcap, image_index)
                                       }))

            # update the count of the progressbar with the current batch
            progressbar.update(len(batch_image_indices))

    # save the results
    save_cells_to_file(Path(video[:-3] + '_result.txt'), cells)
Exemple #8
0
    def __init__(self, *args, **kwargs):
        clickpoints.Addon.__init__(self, *args, **kwargs)

        # qthread and signals for update cell detection and loading ellipse at add on launch
        self.thread = Worker(run_function=None)
        self.thread.thread_started.connect(self.start_pbar)
        self.thread.thread_finished.connect(self.finish_pbar)
        self.thread.thread_progress.connect(self.update_pbar)

        self.stop = False
        self.plot_data = np.array([[], []])
        self.unet = None
        self.layout = QtWidgets.QVBoxLayout(self)

        # Setting up marker Types
        self.marker_type_cell1 = self.db.setMarkerType("cell", "#0a2eff",
                                                       self.db.TYPE_Ellipse)
        self.marker_type_cell2 = self.db.setMarkerType("cell new", "#Fa2eff",
                                                       self.db.TYPE_Ellipse)
        self.cp.reloadTypes()

        # finding and setting path to store network probability map
        self.prob_folder = os.environ["CLICKPOINTS_TMP"]
        self.prob_path = self.db.setPath(self.prob_folder)
        self.prob_layer = self.db.setLayer("prob_map")

        clickpoints.Addon.__init__(self, *args, **kwargs)

        # set the title and layout
        self.setWindowTitle("DeformationCytometer - ClickPoints")
        self.layout = QtWidgets.QVBoxLayout(self)

        # weight file selection
        self.weight_selection = SetFile(store_path,
                                        filetype="weight file (*.h5)")
        self.weight_selection.fileSeleted.connect(self.initUnet)
        self.layout.addLayout(self.weight_selection)

        # update segmentation
        # in range of frames
        seg_layout = QtWidgets.QHBoxLayout()
        self.update_detection_button = QtWidgets.QPushButton(
            "update cell detection")
        self.update_detection_button.setToolTip(
            tooltip_strings["update cell detection"])
        self.update_detection_button.clicked.connect(
            partial(self.start_threaded, self.detect_all))
        seg_layout.addWidget(self.update_detection_button, stretch=5)
        # on single frame
        self.update_single_detection_button = QtWidgets.QPushButton(
            "single detection")
        self.update_single_detection_button.setToolTip(
            tooltip_strings["single detection"])
        self.update_single_detection_button.clicked.connect(self.detect_single)
        seg_layout.addWidget(self.update_single_detection_button, stretch=1)
        self.layout.addLayout(seg_layout)

        # regularity and solidity thresholds
        validator = QtGui.QDoubleValidator(0, 100, 3)
        filter_layout = QtWidgets.QHBoxLayout()
        reg_label = QtWidgets.QLabel("irregularity")
        filter_layout.addWidget(reg_label)
        self.reg_box = QtWidgets.QLineEdit("1.06")
        self.reg_box.setToolTip(tooltip_strings["irregularity"])
        self.reg_box.setValidator(validator)
        filter_layout.addWidget(self.reg_box,
                                stretch=1)  # TODO implement text edited method
        sol_label = QtWidgets.QLabel("solidity")
        filter_layout.addWidget(sol_label)
        self.sol_box = QtWidgets.QLineEdit("0.96")
        self.sol_box.setToolTip(tooltip_strings["solidity"])
        self.sol_box.setValidator(validator)
        filter_layout.addWidget(self.sol_box, stretch=1)
        rmin_label = QtWidgets.QLabel("min radius [µm]")
        filter_layout.addWidget(rmin_label)
        self.rmin_box = QtWidgets.QLineEdit("6")
        self.rmin_box.setToolTip(tooltip_strings["min radius"])
        self.rmin_box.setValidator(validator)
        filter_layout.addWidget(self.rmin_box, stretch=1)
        filter_layout.addStretch(stretch=4)
        self.layout.addLayout(filter_layout)

        # plotting buttons
        layout = QtWidgets.QHBoxLayout()
        self.button_stressstrain = QtWidgets.QPushButton("stress-strain")
        self.button_stressstrain.clicked.connect(self.plot_stress_strain)
        self.button_stressstrain.setToolTip(tooltip_strings["stress-strain"])
        layout.addWidget(self.button_stressstrain)
        self.button_kpos = QtWidgets.QPushButton("k-pos")
        self.button_kpos.clicked.connect(self.plot_k_pos)
        self.button_kpos.setToolTip(tooltip_strings["k-pos"])
        layout.addWidget(self.button_kpos)
        self.button_reg_sol = QtWidgets.QPushButton("regularity-solidity")
        self.button_reg_sol.clicked.connect(self.plot_irreg)
        self.button_reg_sol.setToolTip(tooltip_strings["regularity-solidity"])
        layout.addWidget(self.button_reg_sol)
        self.button_kHist = QtWidgets.QPushButton("k histogram")
        self.button_kHist.clicked.connect(self.plot_kHist)
        self.button_kHist.setToolTip(tooltip_strings["k histogram"])
        layout.addWidget(self.button_kHist)
        self.button_alphaHist = QtWidgets.QPushButton("alpha histogram")
        self.button_alphaHist.clicked.connect(self.plot_alphaHist)
        self.button_alphaHist.setToolTip(tooltip_strings["alpha histogram"])
        layout.addWidget(self.button_alphaHist)
        self.button_kalpha = QtWidgets.QPushButton("k-alpha")
        self.button_kalpha.clicked.connect(self.plot_k_alpha)
        self.button_kalpha.setToolTip(tooltip_strings["k-alpha"])
        layout.addWidget(self.button_kalpha)
        # button to switch between display of loaded and newly generated data
        frame = QtWidgets.QFrame()  # horizontal separating line
        frame.setFrameShape(QtWidgets.QFrame.VLine)
        frame.setLineWidth(3)
        layout.addWidget(frame)
        self.switch_data_button = QtWidgets.QPushButton(
            self.disp_text_existing)
        self.switch_data_button.clicked.connect(self.switch_display_data)
        self.switch_data_button.setToolTip(
            tooltip_strings[self.disp_text_existing])
        layout.addWidget(self.switch_data_button)
        self.layout.addLayout(layout)

        # matplotlib widgets to draw plots
        self.plot = MatplotlibWidget(self)
        self.plot_data = np.array([[], []])
        self.layout.addWidget(self.plot)
        self.layout.addWidget(NavigationToolbar(self.plot, self))
        self.plot.figure.canvas.mpl_connect('button_press_event',
                                            self.button_press_callback)

        # progress bar
        self.progressbar = QtWidgets.QProgressBar()
        self.layout.addWidget(self.progressbar)
        # progressbar lable
        pbar_info_layout = QtWidgets.QHBoxLayout()
        self.pbarLable = QtWidgets.QLabel("")
        pbar_info_layout.addWidget(self.pbarLable, stretch=1)
        pbar_info_layout.addStretch(stretch=2)
        # button to stop thread execution
        self.stop_button = QtWidgets.QPushButton("stop")
        self.stop_button.clicked.connect(self.quit_thread)
        self.stop_button.setToolTip(tooltip_strings["stop"])
        pbar_info_layout.addWidget(self.stop_button, stretch=1)
        self.layout.addLayout(pbar_info_layout)

        # setting paths for data, config and image
        # identifying the full path to the video. If an existing ClickPoints database is opened, the path if
        # is likely relative to the database location.
        self.filename = self.db.getImage(0).get_full_filename()
        if not os.path.isabs(self.filename):
            self.filename = str(
                Path(self.db._database_filename).parent.joinpath(
                    Path(self.filename)))

        self.config_file = self.constructFileNames("_config.txt")
        self.result_file = self.constructFileNames("_result.txt")
        self.addon_result_file = self.constructFileNames("_addon_result.txt")
        self.addon_evaluated_file = self.constructFileNames(
            "_addon_evaluated.csv")
        self.addon_config_file = self.constructFileNames("_addon_config.txt")
        self.vidcap = imageio.get_reader(self.filename)

        # reading in config an data
        self.data_all_existing = pd.DataFrame()
        self.data_mean_existing = pd.DataFrame()
        self.data_all_new = pd.DataFrame()
        self.data_mean_new = pd.DataFrame()
        if self.config_file.exists() and self.result_file.exists():
            self.config = getConfig(self.config_file)
            # ToDo: replace with a flag// also maybe some sort of "reculation" feature
            # Trying to get regularity and solidity from the config
            if "irregularity" in self.config.keys(
            ) and "solidity" in self.config.keys():
                solidity_threshold = self.config["solidity"]
                irregularity_threshold = self.config["irregularity"]
            else:
                solidity_threshold = self.sol_threshold
                irregularity_threshold = self.reg_threshold
            # reading unfiltered data (from results.txt) and data from evaluated.csv
            # unfiltered data (self.data_all_existing) is used to display regularity and solidity scatter plot
            # everything else is from evaluated.csv (self.data_mean_existing)
            self.data_all_existing, self.data_mean_existing = self.load_data(
                self.result_file, solidity_threshold, irregularity_threshold)
        else:  # get a default config if no config is found
            self.config = getConfig(default_config_path)

        ## loading data from previous addon action
        if self.addon_result_file.exists():
            self.data_all_new, self.data_mean_new = self.load_data(
                self.addon_result_file, self.sol_threshold, self.reg_threshold)
            self.start_threaded(
                partial(self.display_ellipses,
                        type=self.marker_type_cell2,
                        data=self.data_all_new))
        # create an addon config file
        # presence of this file allows easy implementation of the load_data and tank threading pipelines when
        # calculating new data
        if not self.addon_config_file.exists():
            shutil.copy(self.config_file, self.addon_config_file)

        self.plot_data_frame = self.data_all
        # initialize plot
        self.plot_stress_strain()

        # Displaying the loaded cells. This is in separate thread as it takes up to 20 seconds.
        self.db.deleteEllipses(type=self.marker_type_cell1)
        self.db.deleteEllipses(type=self.marker_type_cell2)
        self.start_threaded(
            partial(self.display_ellipses,
                    type=self.marker_type_cell1,
                    data=self.data_all_existing))

        print("loading finished")
Exemple #9
0
def load_all_data_old(input_path,
                      solidity_threshold=0.96,
                      irregularity_threshold=1.06,
                      pressure=None,
                      repetition=None,
                      new_eval=False):
    global ax

    evaluation_version = 8

    paths = get_folders(input_path, pressure=pressure, repetition=repetition)
    fit_data = []
    data_list = []
    filters = []
    config = {}
    for index, file in enumerate(paths):
        #print(file)
        output_file = Path(str(file).replace("_result.txt", "_evaluated.csv"))
        output_config_file = Path(
            str(file).replace("_result.txt", "_evaluated_config.txt"))

        # load the data and the config
        data = getData(file)
        config = getConfig(file)
        config["channel_width_m"] = 0.00019001261833616293

        if output_config_file.exists():
            with output_config_file.open("r") as fp:
                config = json.load(fp)
                config["channel_width_m"] = 0.00019001261833616293

        config_changes = check_config_changes(config, evaluation_version,
                                              solidity_threshold,
                                              irregularity_threshold)
        if "filter" in config:
            filters.append(config["filter"])
        """ evaluating data"""
        if not output_file.exists() or config_changes or new_eval:

            getVelocity(data, config)
            # take the mean of all values of each cell
            data = data.groupby(['cell_id'], as_index=False).mean()

            tt_file = Path(str(file).replace("_result.txt", "_tt.csv"))
            if tt_file.exists():
                data.set_index("cell_id", inplace=True)
                data_tt = pd.read_csv(tt_file)
                data["omega"] = np.zeros(len(data)) * np.nan
                for i, d in data_tt.iterrows():
                    if d.tt_r2 > 0.2:
                        data.at[d.id, "omega"] = d.tt * 2 * np.pi

                data.reset_index(inplace=True)
            else:
                print("WARNING: tank treading has not been evaluated yet")

            correctCenter(data, config)

            data = filterCells(data, config, solidity_threshold,
                               irregularity_threshold)
            # reset the indices
            data.reset_index(drop=True, inplace=True)

            getStressStrain(data, config)

            #data = data[(data.stress < 50)]
            data.reset_index(drop=True, inplace=True)

            data["area"] = data.long_axis * data.short_axis * np.pi
            data["pressure"] = config["pressure_pa"] * 1e-5

            data, p = apply_velocity_fit(data)

            omega, mu1, eta1, k_cell, alpha_cell, epsilon = get_cell_properties(
                data)

            try:
                config["evaluation_version"] = evaluation_version
                config["network_evaluation_done"] = True
                config["solidity"] = solidity_threshold
                config["irregularity"] = irregularity_threshold
                data.to_csv(output_file, index=False)
                #print("config", config, type(config))
                with output_config_file.open("w") as fp:
                    json.dump(config, fp, indent=0)

            except PermissionError:
                pass

        else:
            with output_config_file.open("r") as fp:
                config = json.load(fp)
                config["channel_width_m"] = 0.00019001261833616293

        data = pd.read_csv(output_file)

        #data = data[(data.area > 0) * (data.area < 2000) * (data.stress < 250)]
        #data.reset_index(drop=True, inplace=True)

        data_list.append(data)
    l_before = np.sum([d["l_before"] for d in filters])
    l_after = np.sum([d["l_after"] for d in filters])

    config["filter"] = {"l_before": l_before, "l_after": l_after}
    try:
        data = pd.concat(data_list)
    except ValueError:
        raise ValueError("No object found", input_path)
    data.reset_index(drop=True, inplace=True)

    #fitStiffness(data, config)
    return data, config
def load_all_data(input_path, pressure=None, repetition=None):
    global ax

    evaluation_version = 3

    paths = get_folders(input_path, pressure=pressure, repetition=repetition)
    fit_data = []
    data_list = []
    filters = []
    config = {}
    for index, file in enumerate(paths):
        #print(file)
        output_file = Path(str(file).replace("_result.txt", "_evaluated.csv"))
        output_config_file = Path(str(file).replace("_result.txt", "_evaluated_config.txt"))

        # load the data and the config
        data = getData(file)
        config = getConfig(file)

        config["channel_width_m"] = 0.00019001261833616293

        version = 0
        if output_config_file.exists():
            with output_config_file.open("r") as fp:
                config = json.load(fp)
            if "evaluation_version" in config:
                version = config["evaluation_version"]
        if "filter" in config.keys():
            filters.append(config["filter"])

        """ evaluating data"""
        if not output_file.exists() or version < evaluation_version:
            #refetchTimestamps(data, config)

            getVelocity(data, config)
            # take the mean of all values of each cell
            data = data.groupby(['cell_id']).mean()

            correctCenter(data, config)

            data = filterCells(data, config)
            # reset the indices
            data.reset_index(drop=True, inplace=True)

            getStressStrain(data, config)

            #data = data[(data.stress < 50)]
            data.reset_index(drop=True, inplace=True)

            data["area"] = data.long_axis * data.short_axis * np.pi
            try:
                config["evaluation_version"] = evaluation_version
                data.to_csv(output_file, index=False)
                #print("config", config, type(config))
                with output_config_file.open("w") as fp:
                    json.dump(config, fp)

            except PermissionError:
                pass
        else:
            with output_config_file.open("r") as fp:
                config = json.load(fp)

        data = pd.read_csv(output_file)

        #data = data[(data.area > 0) * (data.area < 2000) * (data.stress < 250)]
        #data.reset_index(drop=True, inplace=True)

        data_list.append(data)
    l_before = np.sum([d["l_before"] for d in filters])
    l_after = np.sum([d["l_after"] for d in filters])

    config["filter"] = {"l_before":l_before, "l_after":l_after}
    data = pd.concat(data_list)
    data.reset_index(drop=True, inplace=True)

    #fitStiffness(data, config)
    return data, config
rows = 1
cols = 3
#row_index = 0
data_index = -1
dataset = datasets[0]
datafiles = dataset["datafiles"]
for data_index, datafile in enumerate(datafiles):
    data_index += 1
    paths = []
    pressures = []
    ax = None
    datafiles = dataset["datafiles"]
    #
    for index, file in enumerate(Path(datafile).glob("**/*_result.txt")):
        config = getConfig(file)
        paths.append(file)
        pressures.append(config['pressure_pa'] / 100_000)

    paths = np.array(paths)
    pressures = np.array(pressures)

    unique_pressures = np.unique(pressures)
    unique_pressures = [3]  #unique_pressures[unique_pressures > 0.5]
    print(unique_pressures)

    fit_data = []
    index = 1

    #for data_index, datafile in enumerate(datafiles):
    fit_data = []
r_min = 6  # minimum radius of (undeformed) cells; cells with a smaller radius will not be considered
batch_size = 100  # the number if images that are analyzed at once with the neural network. Choose the largest number allowed by your graphics card.

# reading commandline arguments if executed from terminal
file, network_weight = read_args_detect_cells()

video = getInputFile(settings_name="detect_cells.py", video=file)

# initialize variables
unet = None
cells = []

# get image and config
vidcap = imageio.get_reader(video)
config = getConfig(video)

# initialize the progressbar
with tqdm.tqdm(total=len(vidcap)) as progressbar:
    # iterate over image batches
    for batch_images, batch_image_indices in batch_iterator(
            vidcap, batch_size, preprocess):
        # update the description of the progressbar
        progressbar.set_description(f"{len(cells)} good cells")

        # initialize the unet in the first iteration
        if unet is None:
            im = batch_images[0]
            unet = UNet((im.shape[0], im.shape[1], 1),
                        1,
                        d=8,
Exemple #13
0
    def __init__(self, *args, **kwargs):
        clickpoints.Addon.__init__(self, *args, **kwargs)

        self.layout = QtWidgets.QVBoxLayout(self)

        # Check if the marker type is present
        self.marker_type_cell = self.db.setMarkerType("cell", "#0a2eff",
                                                      self.db.TYPE_Ellipse)
        self.marker_type_cell2 = self.db.setMarkerType("cell2", "#Fa2eff",
                                                       self.db.TYPE_Ellipse)
        self.cp.reloadTypes()

        self.loadData()

        clickpoints.Addon.__init__(self, *args, **kwargs)
        # set the title and layout
        self.setWindowTitle("DeformationCytometer - ClickPoints")
        self.layout = QtWidgets.QVBoxLayout(self)

        # add export buttons
        layout = QtWidgets.QHBoxLayout()
        self.button_stressstrain = QtWidgets.QPushButton("stress-strain")
        self.button_stressstrain.clicked.connect(self.plot_stress_strain)
        layout.addWidget(self.button_stressstrain)

        self.button_stressy = QtWidgets.QPushButton("y-strain")
        self.button_stressy.clicked.connect(self.plot_y_strain)
        layout.addWidget(self.button_stressy)

        self.button_y_angle = QtWidgets.QPushButton("y-angle")
        self.button_y_angle.clicked.connect(self.plot_y_angle)
        layout.addWidget(self.button_y_angle)

        self.layout.addLayout(layout)

        # add a plot widget
        self.plot = MatplotlibWidget(self)
        self.layout.addWidget(self.plot)
        self.layout.addWidget(NavigationToolbar(self.plot, self))
        self.plot.figure.canvas.mpl_connect('button_press_event',
                                            self.button_press_callback)

        # add a progress bar
        self.progressbar = QtWidgets.QProgressBar()
        self.layout.addWidget(self.progressbar)

        # connect slots
        # self.signal_update_plot.connect(self.updatePlotImageEvent)
        # self.signal_plot_finished.connect(self.plotFinishedEvent)

        # initialize the table
        # self.updateTable()
        # self.selected = None

        filename = self.db.getImage(0).get_full_filename()
        print(filename.replace(".tif", "_config.txt"))
        self.config = getConfig(filename.replace(".tif", "_config.txt"))
        self.data = getData(filename.replace(".tif", "_result.txt"))

        getVelocity(self.data, self.config)

        try:
            correctCenter(self.data, self.config)
        except ValueError:
            pass

        self.data = self.data.groupby(['cell_id']).mean()

        self.data = filterCells(self.data, self.config)
        self.data.reset_index(drop=True, inplace=True)

        getStressStrain(self.data, self.config)
    def __call__(self, filename, copy_of_file=None):
        """
        Loads an .tif file stack and yields all the images.
        """
        import imageio
        import sys
        from deformationcytometer.detection import pipey
        from deformationcytometer.detection.includes.regionprops import preprocess, getTimestamp
        from deformationcytometer.includes.includes import getConfig
        import clickpoints
        import numpy as np

        log("1load_images", "prepare", 1)

        # open the image reader
        #reader = reader2(imageio.get_reader(copy_of_file or filename))
        try:
            cdb = clickpoints.DataFile(copy_of_file or filename)
        except Exception as err:
            print(err, file=sys.stderr)
            return

        # get the config file
        config = getConfig(filename)

        # get the total image count
        image_count = cdb.getImageCount()

        yield dict(filename=filename,
                   index=-1,
                   type="start",
                   image_count=image_count)
        log("1load_images", "prepare", 0)

        data_storage_numpy = None

        log("1load_images", "read", 1)
        images = []
        masks = []
        timestamps = []
        start_batch_index = 0
        timestamp_start = None
        log("1load_images", "read", 1, 0)

        # iterate over all images in the file
        for image_index, img in enumerate(cdb.getImages()):
            im = img.data
            # ensure image has only one channel
            if len(im.shape) == 3:
                im = im[:, :, 0]

            mask = np.zeros((im.shape[0], im.shape[1]), dtype=np.uint8)
            if self.type == "polygon":
                for obj in img.polygons:
                    mask[obj.getPixels()] = 1
                from skimage.morphology import binary_erosion
                mask = mask - binary_erosion(binary_erosion(mask))
            elif self.type == "mask":
                from skimage.morphology import binary_erosion
                if img.mask is not None:
                    mask = img.mask.data
                mask = mask - binary_erosion(binary_erosion(mask))
            masks.append(mask)

            # get the timestamp from the file
            timestamp = 0  #float(getTimestamp(reader, image_index))
            if timestamp_start is None:
                timestamp_start = timestamp
            timestamp -= timestamp_start

            images.append(im)
            timestamps.append(timestamp)

            if image_index == image_count - 1 or len(
                    images) == self.batch_size:
                info = self.data_storage.allocate([len(images)] +
                                                  list(images[0].shape),
                                                  dtype=np.float32)
                info_mask = self.data_storage.allocate([len(images)] +
                                                       list(images[0].shape),
                                                       dtype=np.uint8)
                data_storage_numpy = self.data_storage.get_stored(info)
                data_storage_numpy_mask = self.data_storage.get_stored(
                    info_mask)
                for i, im in enumerate(images):
                    data_storage_numpy[i] = im
                    data_storage_numpy_mask[i] = masks[i]

                log("1load_images", "read", 0, start_batch_index)
                yield dict(filename=filename,
                           index=start_batch_index,
                           end_index=start_batch_index + len(images),
                           type="image",
                           timestamps=timestamps,
                           data_info=info,
                           mask_info=info_mask,
                           config=config,
                           image_count=image_count)
                if image_index != image_count - 1:
                    log("1load_images", "read", 1,
                        start_batch_index + len(images))
                images = []
                masks = []
                timestamps = []
                start_batch_index = image_index + 1

            if image_index == image_count - 1:
                break

        cdb.db.close()
        if copy_of_file is not None:
            copy_of_file.unlink()

        yield dict(filename=filename, index=image_count, type="end")
    def __call__(self, filename, copy_of_file=None):
        """
        Loads an .tif file stack and yields all the images.
        """
        import imageio
        import sys
        from deformationcytometer.detection import pipey
        from deformationcytometer.detection.includes.regionprops import preprocess, getTimestamp
        from deformationcytometer.includes.includes import getConfig
        import clickpoints
        import numpy as np

        class reader2:
            def __init__(self, reader):
                self.reader = reader

            def __len__(self):
                return len(self.reader) // 2

            def __iter__(self):
                for i, im in enumerate(self.reader):
                    if i % 2 == 0:
                        yield im

            def get_meta_data(self, index):
                return self.reader.get_meta_data(index * 2)

            def close(self):
                self.reader.close()

        log("1load_images", "prepare", 1)

        # open the image reader
        #reader = reader2(imageio.get_reader(copy_of_file or filename))
        try:
            reader = imageio.get_reader(copy_of_file or filename)
        except Exception as err:
            print(err, file=sys.stderr)
            return
        # get the config file
        config = getConfig(filename)
        # get the total image count
        image_count = len(reader)

        if self.write_clickpoints_file:
            cdb = clickpoints.DataFile(filename[:-4] + ".cdb", "w")
            cdb.setMaskType("prediction", color="#FF00FF", index=1)

        yield dict(filename=filename,
                   index=-1,
                   type="start",
                   image_count=image_count)
        log("1load_images", "prepare", 0)

        data_storage_numpy = None

        log("1load_images", "read", 1)
        images = []
        timestamps = []
        start_batch_index = 0
        timestamp_start = None
        log("1load_images", "read", 1, 0)

        # iterate over all images in the file
        for image_index, im in enumerate(reader):
            # ensure image has only one channel
            if len(im.shape) == 3:
                im = im[:, :, 0]
            # get the timestamp from the file
            timestamp = float(getTimestamp(reader, image_index))
            if timestamp_start is None:
                timestamp_start = timestamp
            timestamp -= timestamp_start

            if self.write_clickpoints_file:
                cdb.setImage(filename,
                             frame=image_index)  #, timestamp=timestamp)

            images.append(im)
            timestamps.append(timestamp)

            if image_index == image_count - 1 or len(
                    images) == self.batch_size:

                info = self.data_storage.allocate([len(images)] +
                                                  list(images[0].shape),
                                                  dtype=np.float32)
                info_mask = self.data_storage.allocate([len(images)] +
                                                       list(images[0].shape),
                                                       dtype=np.uint8)
                data_storage_numpy = self.data_storage.get_stored(info)
                for i, im in enumerate(images):
                    data_storage_numpy[i] = im

                log("1load_images", "read", 0, start_batch_index)
                yield dict(filename=filename,
                           index=start_batch_index,
                           end_index=start_batch_index + len(images),
                           type="image",
                           timestamps=timestamps,
                           data_info=info,
                           mask_info=info_mask,
                           config=config,
                           image_count=image_count)
                if image_index != image_count - 1:
                    log("1load_images", "read", 1,
                        start_batch_index + len(images))
                images = []
                timestamps = []
                start_batch_index = image_index + 1

            if image_index == image_count - 1:
                break

        reader.close()
        if copy_of_file is not None:
            copy_of_file.unlink()

        yield dict(filename=filename, index=image_count, type="end")
Exemple #16
0
video = getInputFile()
print("video", video)

name_ex = os.path.basename(video)
filename_base, file_extension = os.path.splitext(name_ex)
output_path = os.path.dirname(video)
flatfield = output_path + r'/' + filename_base + '.npy'
configfile = output_path + r'/' + filename_base + '_config.txt'

#%% Setup model
# shallow model (faster)
unet = None

#%%
config = getConfig(configfile)

batch_size = 100
print(video)
vidcap = imageio.get_reader(video)
vidcap2 = getRawVideo(video)
progressbar = tqdm.tqdm(vidcap)

cells = []

im = vidcap.get_data(0)
batch_images = np.zeros([batch_size, im.shape[0], im.shape[1]],
                        dtype=np.float32)
batch_image_indices = []
ips = 0
for image_index, im in enumerate(progressbar):