Ejemplo n.º 1
0
    def _dims_shape(lif: LifFile):
        """
        Get the dimensions for the opened file from the binary data (not the metadata)

        Parameters
        ----------
        lif: LifFile

        Returns
        -------
        list[dict]
            A list of dictionaries containing Dimension / depth. If the shape is
            consistent across Scenes then the list will have only one Dictionary. If
            the shape is inconsistent the the list will have a dictionary for each
            Scene. A consistently shaped file with 3 scenes, 7 time-points
            and 4 Z slices containing images of (h,w) = (325, 475) would return
            [
             {'S': (0, 3), 'T': (0,7), 'X': (0, 475), 'Y': (0, 325), 'Z': (0, 4)}
            ].
            The result for a similarly shaped file but with different number of time
            points per scene would yield
            [
             {'S': (0, 1), 'T': (0,8), 'X': (0, 475), 'Y': (0, 325), 'Z': (0, 4)},
             {'S': (1, 2), 'T': (0,6), 'X': (0, 475), 'Y': (0, 325), 'Z': (0, 4)},
             {'S': (2, 3), 'T': (0,7), 'X': (0, 475), 'Y': (0, 325), 'Z': (0, 4)}
            ]

        """
        shape_list = [
            {
                Dimensions.Time: (0, img.nt),
                Dimensions.Channel: (0, img.channels),
                Dimensions.SpatialZ: (0, img.nz),
                Dimensions.SpatialY: (0, img.dims[1]),
                Dimensions.SpatialX: (0, img.dims[0]),
            }
            for idx, img in enumerate(lif.get_iter_image())
        ]
        consistent = all(elem == shape_list[0] for elem in shape_list)
        if consistent:
            shape_list[0][Dimensions.Scene] = (0, len(shape_list))
            shape_list = [shape_list[0]]
        else:
            for idx, lst in enumerate(shape_list):
                lst[Dimensions.Scene] = (idx, idx + 1)
        return shape_list
Ejemplo n.º 2
0
def update_image():

    global CENTERS_NO

    try:
        session['stack'] = int(request.args.get('stack'))
        session['zframe'] = int(request.args.get('zframe'))
        session['channel'] = int(request.args.get('channel'))
        session['bg_thresh'] = int(request.args.get('bg_thresh'))
        session['adaptive_thresh'] = int(request.args.get('adaptive_thresh'))
        session['erosion'] = int(request.args.get('erosion'))
        session['dilation'] = int(request.args.get('dilation'))
        session['min_dist'] = int(request.args.get('min_dist'))
        session['gamma'] = float(request.args.get('gamma'))
        session['gain'] = float(request.args.get('gain'))

        # img = cv2.imread('./test.jpg', cv2.IMREAD_GRAYSCALE)

        lif_file = LifFile(session['file_path'])
        img_list = [i for i in lif_file.get_iter_image()]
        img, CENTERS_NO = generate_image(img_list, session['stack'], session['zframe'], session['channel'],
                                         session['bg_thresh'], session['adaptive_thresh'],
                                         session['erosion'], session['dilation'],
                                         session['min_dist'], session['gamma'], session['gain'],
                                         connectivity=CONNECTIVITY, circle_radius=CIRCLE_RADIUS)


        (flag, encodedImage) = cv2.imencode(".jpg", img)

        response = base64.b64encode(encodedImage)

        return response

    except Exception as e:
        logger.error(e)
        resp = {'message': 'Failed'}
        return make_response(jsonify(resp), 400)
Ejemplo n.º 3
0
def download_file():

    export_option = request.args.get('export_option')

    if os.path.exists(session['export_dir']):
        shutil.rmtree(session['export_dir'])
    os.makedirs(session['export_dir'])

    lif_file = LifFile(session['file_path'])
    img_list = [i for i in lif_file.get_iter_image()]
    data = download_image(export_option, session['export_dir'], img_list,
                          session['stack_list'], session['stack_dict_list'],
                          session['stack'], session['zframe'], session['channel'],
                          session['bg_thresh'], session['adaptive_thresh'],
                          session['erosion'], session['dilation'],
                          session['min_dist'], session['gamma'], session['gain'], CONNECTIVITY, CIRCLE_RADIUS)


    data_df = pd.DataFrame(data)
    export_file_path_data = os.path.join(session['export_dir'], "data.csv")
    data_df.to_csv(export_file_path_data, index=False, sep=";")

    config = {'background_threshold': session['bg_thresh'],
              'adaptive_threshold': session['adaptive_thresh'],
              'erosion_iteration': session['erosion'],
              'dilation_iteration': session['dilation'],
              'minimum_distance': session['min_dist']}

    export_file_path_config = os.path.join(session['export_dir'], "config.txt")
    with open(export_file_path_config, 'w') as file:
        file.write(json.dumps(config))

    def retrieve_file_paths(dirName):

        # setup file paths variable
        filePaths = []

        # Read all directory, subdirectories and file lists
        for root, directories, files in os.walk(dirName):
            for filename in files:
                # Create the full filepath by using os module.
                filePath = os.path.join(root, filename)
                filePaths.append(filePath)

        # return all paths
        return filePaths

    # Call the function to retrieve all files and folders of the assigned directory
    filePaths = retrieve_file_paths(session['export_dir'])

    # printing the list of all files to be zipped
    logger.info('The following list of files will be zipped:')
    for fileName in filePaths:
        logger.info(fileName)

    zip_file_path = session['export_dir'] + '.zip'
    zip_file = zipfile.ZipFile(zip_file_path, 'w')
    with zip_file:
        # writing each file one by one
        for file in filePaths:
            zip_file.write(file)

    return_data = io.BytesIO()
    with open(zip_file_path, 'rb') as fo:
        return_data.write(fo.read())
    # (after writing, cursor will be at last byte, so move it to start)
    return_data.seek(0)

    os.remove(zip_file_path)
    shutil.rmtree(session['export_dir'])

    return send_file(return_data, mimetype='application/zip',
                     attachment_filename='download.zip')
Ejemplo n.º 4
0
    def _compute_offsets(lif: LifFile) -> Tuple[List[np.ndarray], np.ndarray]:
        """
        Compute the offsets for each of the YX planes so that the LifFile object
        doesn't need to be created for each YX plane read.

        Parameters
        ----------
        lif : LifFile
            The LifFile object with an open file pointer to the file.

        Returns
        -------
        List[numpy.ndarray]
            The list of numpy arrays holds the offsets and it should be accessed as
            [S][T,C,Z].
        numpy.ndarray
            The second numpy array holds the plane read length per Scene.

        """
        scene_list = []
        scene_img_length_list = []

        for s_index, img in enumerate(lif.get_iter_image()):
            pixel_type = LifReader.get_pixel_type(lif.xml_root, s_index)
            (
                x_size,
                y_size,
                z_size,
                t_size,
            ) = img.dims  # in comments in this block these correspond to X, Y, Z, T
            c_size = img.channels  # C
            img_offset, img_block_length = img.offsets
            offsets = np.zeros(shape=(t_size, c_size, z_size), dtype=np.uint64)
            t_offset = c_size * z_size
            z_offset = c_size
            seek_distance = c_size * z_size * t_size
            if img_block_length == 0:
                # In the case of a blank image, we can calculate the length from
                # the metadata in the LIF. When this is read by the parser,
                # it is set to zero initially.
                log.debug(
                    "guessing image length: LifFile assumes 1byte per pixel,"
                    " but I think this is wrong!"
                )
                image_len = seek_distance * x_size * y_size * pixel_type.itemsize
            else:  # B = bytes per pixel
                image_len = int(
                    img_block_length / seek_distance
                )  # B*X*Y*C*Z*T / C*Z*T = B*X*Y = size of an YX plane

            for t_index in range(t_size):
                t_requested = t_offset * t_index  # C*Z*t_index
                for c_index in range(c_size):
                    c_requested = c_index
                    for z_index in range(z_size):
                        z_requested = z_offset * z_index  # z_index * C
                        item_requested = (
                            t_requested + z_requested + c_requested
                        )  # the number of YX frames to jump
                        # self.offsets[0] is the offset to the beginning of the image
                        # block here we index into that block to get the offset for any
                        # YX frame in this image block
                        offsets[t_index, c_index, z_index] = np.uint64(
                            img.offsets[0] + image_len * item_requested
                        )

            scene_list.append(offsets)
            scene_img_length_list.append(image_len)

        return scene_list, np.asarray(scene_img_length_list, dtype=np.uint64)
Ejemplo n.º 5
0
def track_lif(lif_path: str, out_path: str, model: keras.models.Model) -> None:
    """
    Applies ML model (model object) to everything in the lif file.

    This will write a trackmate xml file via the method tm_xml.write_xml(),
    and save output tiff image stacks from the lif file.

    Args:
        lif_path (str): Path to the lif file
        out_path (str): Path to output directory
        model (str): A trained keras.models.Model object

    Returns: None
    """
    print("loading LIF")
    lif_data = LifFile(lif_path)
    print("Iterating over lif")
    for image in lif_data.get_iter_image():
        folder_path = "/".join(str(image.path).strip("/").split('/')[1:])
        path = folder_path + "/" + str(image.name)
        name = image.name

        if os.path.exists(os.path.join(out_path, path + '.tif.xml')) \
           or os.path.exists(os.path.join(out_path, path + '.tif.trackmate.xml')):
            print(str(path) + '.xml' + ' exists, skipping')
            continue

        make_dirs = os.path.join(out_path, folder_path)
        if not os.path.exists(make_dirs):
            os.makedirs(make_dirs)

        print("Processing " + str(path))
        start = time.time()
        # initialize XML creation for this file
        tm_xml = trackmateXML()
        i = 1
        image_out = image.get_frame()  # Initialize the output image
        images_to_append = []
        for frame in image.get_iter_t():
            images_to_append.append(frame)
            np_image = np.asarray(frame.convert('RGB'))
            image_array = np_image[:, :, ::-1].copy()

            tm_xml.filename = name + '.tif'
            tm_xml.imagepath = os.path.join(out_path, folder_path)
            if tm_xml.nframes < i:  # set nframes to the maximum i
                tm_xml.nframes = i
            tm_xml.frame = i
            # preprocess image for network
            image_array = preprocess_image(image_array)
            image_array, scale = resize_image(image_array)

            # process image
            boxes, scores, labels = model.predict_on_batch(
                np.expand_dims(image_array, axis=0))

            # correct for image scale
            boxes /= scale

            # filter the detection boxes
            pre_passed_boxes = []
            pre_passed_scores = []
            for box, score, label in zip(boxes[0], scores[0], labels[0]):
                if score >= 0.2:
                    pre_passed_boxes.append(box.tolist())
                    pre_passed_scores.append(score.tolist())

            passed_boxes, passed_scores = filter_boxes(
                in_boxes=pre_passed_boxes,
                in_scores=pre_passed_scores,
                _passed_boxes=[],
                _passed_scores=[])  # These are necessary

            print("found " + str(len(passed_boxes)) + " cells in " +
                  str(path) + " frame " + str(i))

            # tell the trackmate writer to add the passed_boxes to the final output xml
            tm_xml.add_frame_spots(passed_boxes, passed_scores)
            i += 1
        # write the image to trackmate, prepare for next image
        print("processing time: ", time.time() - start)
        tm_xml.write_xml()
        image_out.save(os.path.join(out_path, path + '.tif'),
                       format="tiff",
                       append_images=images_to_append[1:],
                       save_all=True,
                       compression='tiff_lzw')