Esempio n. 1
0
def saveas_imagej_tiff(im_stk, stack_rois, d):
    lets_get_meta = []
    width = im_stk.shape[4]
    height = im_stk.shape[3]

    print('Saving img stack of dims: ', im_stk.shape)
    for regions, z in stack_rois:
        for reg in regions:

            r0 = np.clip(reg[0], 0, width)
            r1 = np.clip(reg[1], 0, height)
            r2 = np.clip(reg[2], 0, width)
            r3 = np.clip(reg[3], 0, height)
            roi_b = Roi(r0, r1, r2, r3, width, height, 0)
            roi_b.name = "Region-1-p-" + str(reg[4])
            roi_b.roiType = 1
            if d.ch_to_save == 1:
                roi_b.setPosition(z)
            else:
                roi_b.setPositionH(1, z + 1, -1)  #channel, sliceZ, frame

            roi_b.strokeLineWidth = 1.0
            roi_b.strokeColor = RGB_encoder(255, 255, 0, 0)
            lets_get_meta.append(encode_ij_roi(roi_b))

    metadata = {'hyperstack': True}
    metadata['mode'] = 'composite'
    metadata['unit'] = 'um'
    metadata[
        'spacing'] = d.z_stage_move  #This is the z-spacing of the image-stack  (for unit see 'unit').
    metadata['min'] = 0.0
    metadata['max'] = 0.0

    info = "-----------------------\n"
    info += "AMCA - Automated Microscope Control Algorithm.py. \n"
    info += "-----------------------\n"
    info += "Software by Dominic Waithe. \n"
    info += "Automatic detection was used to find the cells in this image.\n"
    info += "For more details: https://github.com/dwaithe/amca \n"
    info += "AMCA version used: " + str(
        d.amcarepo) + " (search github with this).\n"
    info += "Tiff export: tifffile.py (Christoph Gohlke)\n"
    info += "ROI encoded using functions from: https://github.com/dwaithe/ijpython_roi \n"
    info += "-----------------------\n"
    info += "Acquisition configuration.\n"
    info += "-----------------------\n"
    info += "Microscope type: " + d.microscope_type + ".\n"
    info += "Objective type: " + d.objective_type + ".\n"
    info += "Camera type: " + d.camera_type + ".\n"
    info += "Lamp type: " + d.lamp_type + ".\n"
    info += "Objective magnification: " + str(d.objective_mag) + "x.\n"
    info += "Camera pixel size: " + str(
        d.cam_pixel_size
    ) + " um (raw pixel size of camera before mag and binning).\n"
    info += "Camera binning factor: " + str(
        d.cam_binning) + " (hardware binning).\n"
    info += "Digital binning factor: " + str(
        d.digital_binning) + " (digital binning).\n"
    info += "XY-spacing: " + str(d.voxel_xy) + " um, Z-spacing: " + str(
        d.z_stage_move) + " um.\n"
    info += "Excitation lines: " + ",".join(
        d.excitation_lines[0:d.ch_to_image]) + ". \n"
    info += "Camera gain: " + str(d.cam_gain) + "\n"
    s = [str(i) for i in d.exposures[0:d.ch_to_image]]
    info += "Camera exposures: " + ",(ms) ".join(
        s) + "ms (acquisition exposure duration).\n"
    info += "Channels imaged: " + str(
        d.ch_to_image) + " (number of channels imaged).\n"
    info += "Channels analyzed: " + str(
        d.ch_to_analyze
    ) + " (number of channels on which detection was run).\n"
    info += "-----------------------\n"
    info += "Detection algorithm configuration. \n"
    info += "-----------------------\n"
    info += "Computer name: " + str(d.computer_name) + ".\n"
    info += "Processor type: " + str(d.processor_type) + ".\n"
    info += "Detection algorithm: " + str(d.algorithm_name) + ".\n"
    info += "Detection repo hash: " + d.dkrepo + " (search github with this).\n"
    info += "Detection model configuration: " + str(d.config_path) + ".\n"
    info += "Detection model weights: " + str(d.weight_path) + ".\n"
    info += "Detection metadata: " + str(d.meta_path) + ".\n"
    info += "Analysis method: " + str(d.analysis_method) + ".\n"
    info += "-----------------------\n"
    info += "Acquisition performed at: " + datetime.now().strftime(
        "%Y/%m/%d, %H:%M:%S") + ".\n"
    info += "-----------------------\n"
    info += "Stage X-pos: " + str(d.stage_pos_x) + " um.\n"
    info += "Stage Y-pos: " + str(d.stage_pos_y) + " um.\n"
    info += "Piezo Z-pos: "
    if d.names != []:
        for name in d.names:
            info += str(name) + ", "
    else:
        info += str(d.stage_pos_z) + " um.\n"
    info += "(um)\n"
    if d.best_focus_idx != None:
        info += "best_focus_idx: " + str(d.best_focus_idx) + "\n"
        info += "best_focus_um: " + str(d.best_focus) + " um.\n"

    resolution = (
        1. / d.voxel_xy, 1. / d.voxel_xy
    )  #Expects tuple, ratio pixel to physical unit (for unit see 'unit').
    print('numoftimepts', d.num_of_tpts)
    if d.num_of_tpts > 0:
        stp = str(d.tp).zfill(4)
        out_file_path = d.out_path + "img_stk_x_" + str(
            d.stage_pos_x) + "y_" + str(d.stage_pos_y) + "t_" + stp + ".tif"
    else:
        out_file_path = d.out_path + "img_stk_x_" + str(
            d.stage_pos_x) + "y_" + str(d.stage_pos_y) + ".tif"

    if not os.path.exists(d.out_path):
        os.makedirs(d.out_path)

    tifffile.imsave(out_file_path,
                    im_stk,
                    resolution=resolution,
                    shape=im_stk.shape,
                    ijmetadata={
                        'Overlays': lets_get_meta,
                        'info': info
                    },
                    metadata=metadata,
                    imagej=True)
Esempio n. 2
0
            print(detect)
            a = np.clip((detect[2][0] - detect[2][2] // 2) / output_wid *
                        import_im.shape[1], 0, import_im.shape[1])
            b = np.clip((detect[2][1] - detect[2][3] // 2) / output_hei *
                        import_im.shape[0], 0, import_im.shape[0])
            c = np.clip(detect[2][2] / output_wid * import_im.shape[1], 0,
                        import_im.shape[1])
            d = np.clip(detect[2][3] / output_hei * import_im.shape[0], 0,
                        import_im.shape[0])
            roi_b = Roi(a, b, c, d, im.shape[0], im.shape[1], 0)
            roi_b.name = "Region 1"
            roi_b.roiType = 1
            roi_b.position = 10
            roi_b.strokeLineWidth = 3.0
            roi_b.strokeColor = RGB_encoder(255, 0, 255, 255)
            data.append(encode_ij_roi(roi_b))
            svg_out += "<g><path d=\'m"
            svg_out += " " + str(int(a)) + "," + str(int(b)) + " " + str(
                int(c)) + "," + str(0) + " " + str(0) + "," + str(
                    int(d)) + " " + str(
                        int(0 - c)) + "," + str(0) + " z'/></g>" "\n"

        svg_out += "</svg>"
        f.write(svg_out)
        f.close()
        print(svg_out)
        metadata = {
            'hyperstack': True,
            'channels': 3,
            'ImageJ': '1.52g',
            'Overlays': data,
Esempio n. 3
0
    def append_new_regions(self, outpath, extend_roi=True):
        """Function which will take images and append ROI as overlays to them.
        outpath       -- The output directory for images with annotation, can be same as input to save space.
        
        If extend_roi=True then any overlays present will be added to the existing ones.
        If extend_roi=False then any overlays present in the file will be replaced by the old ones.
        """
        for stg_x in self.x_unq:
            for stg_y in self.y_unq:
                pathname2 = "img_stk_x_" + str(stg_x) + "y_" + str(
                    stg_y) + "t_0021.tif"
                input_file = self.filepath + self.subfolder_for_images + pathname2

                output_file = outpath + pathname2
                #for ref in ref_loc:
                if str(stg_x) + '_' + str(stg_y) in self.ref_loc:

                    tfile = tifffile.TiffFile(input_file)

                    slices = self.ref_loc[str(stg_x) + '_' + str(stg_y)]
                    #Short lists all of the regions in an image.
                    ind = ((self.final_out[5, :] == float(stg_x)) &
                           (self.final_out[6, :] == float(stg_y)))
                    trks = self.final_out[:, ind]
                    data = []

                    #Get existing metadata
                    metadata = tfile.imagej_metadata
                    #Get existing image-data.

                    im_stk = tfile.asarray()

                    #Run through each region in the image.
                    for trk in range(0, trks.shape[1]):

                        trkv = trks[:, trk]
                        x0 = (trkv[0] - trkv[5])

                        y0 = (trkv[1] - trkv[6])

                        wid = (trkv[2] - trkv[5]) - x0
                        hei = (trkv[3] - trkv[6]) - y0

                        #Inititate each region.
                        roi_b = Roi(
                            self.img_width - (x0 / self.scale) -
                            (wid / self.scale), y0 / self.scale,
                            hei / self.scale, wid / self.scale,
                            self.img_height, self.img_width, 0)
                        roi_b.name = "Region-" + str(int(trkv[4]))
                        roi_b.roiType = 1

                        #Find which slice the location refers to.

                        slices = self.ref_loc[str(trkv[5]) + '_' +
                                              str(trkv[6])]

                        ranget = list(
                            np.round(
                                np.arange(slices[0], slices[1] + self.zspacing,
                                          self.zspacing), 2))

                        roi_b.position = ranget.index(np.round(trkv[7], 2)) + 1

                        roi_b.channel = 1

                        roi_b.setPositionH(
                            1,
                            ranget.index(np.round(trkv[7], 2)) + 1, 0)

                        roi_b.strokeLineWidth = 3.0
                        #Colours each volume-region uniquely.
                        np.random.seed(int(trkv[4]))
                        roi_b.strokeColor = RGB_encoder(
                            255, np.random.randint(0, 255),
                            np.random.randint(0, 255),
                            np.random.randint(0, 255))

                        data.append(encode_ij_roi(roi_b))

                    #We overwrite the existing overlays in the file.
                    if extend_roi == True:
                        metadata['Overlays'].extend(data)
                    else:
                        metadata['Overlays'] = data

                    tifffile.imsave(output_file,
                                    im_stk,
                                    shape=im_stk.shape,
                                    imagej=True,
                                    ijmetadata=metadata)
                    tfile.close()