Exemple #1
0
 def connect(self, auto_update: bool = True):
     if self.engine is None:
         needs_creating = not self.is_exists()
         if self.db_qualified_name == ":memory:":
             db_qualified_name = ":memory:"
         else:
             db_qualified_name = self.db_info.db_full_file_path
             force_directories(self.db_folder_name)
         self.engine = sqlite3.connect(
             database=db_qualified_name,
             detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES,
         )
         if needs_creating:
             self.engine.execute(
                 f"""CREATE TABLE {self.main_table} (Luid TEXT NOT NULL PRIMARY KEY,
                                                     Name TEXT NOT NULL COLLATE NOCASE,
                                                     FilePath TEXT NOT NULL COLLATE NOCASE,
                                                     Experiment TEXT COLLATE NOCASE,
                                                     Plant TEXT COLLATE NOCASE,
                                                     Date DATE,
                                                     Time TIME_OBJECT,
                                                     date_time TIMESTAMP,
                                                     Camera TEXT COLLATE NOCASE,
                                                     Angle TEXT COLLATE NOCASE,
                                                     Wavelength TEXT COLLATE NOCASE)"""
             )
             if auto_update:
                 self.update()
     return True
Exemple #2
0
 def load_from_database(self, sftp):
     if os.path.isfile(self.cache_file_path):
         logger.debug(f"Retrieved from cache: {str(self)}")
         return self.load_from_harddrive(self.cache_file_path)
     src_img = None
     try:
         logger.info(f"Downloading {self.name}, please wait...")
         try:
             with sftp.open(self.blob_path) as file:
                 file_size = file.stat().st_size
                 file.prefetch(file_size)
                 file.set_pipelined()
                 src_img = cv2.imdecode(
                     np.fromstring(file.read(), np.uint8),
                     1,
                 )
                 if os.path.isdir(ipso_folders.get_path("mass_storage", False)):
                     force_directories(os.path.dirname(self.cache_file_path))
                     cv2.imwrite(self.cache_file_path, src_img)
         except Exception as e:
             logger.exception(f"FTP error: {repr(e)}")
         src_img = self.fix_image(src_image=src_img)
     except Exception as e:
         logger.exception(f"Failed to download {repr(self)} because {repr(e)}")
         return None
     else:
         logger.info(f"Download succeeded for  {self.name}")
         return src_img
Exemple #3
0
    def process_wrapper(self, **kwargs):
        """
        Augment Data:
        Copies image to target folder after modifying it
        Can have a ROI as a pre-processor
        Real time : False

        Keyword Arguments (in parentheses, argument name):
            * Target folder (target_folder):
            * Put image in subfolder with experiment as its name (add_sub_folder):
            * Image output format (output_format):
            * Test only, do not actually copy (test_only):
            * Source image (original):
            * Rotate 90 degres (r90):
            * Rotate 180 degres (r180):
            * Rotate 270 degres (r270):
            * flip horizontally (flip_h):
            * flip vertically (flip_v):
            * Gamma values (same syntax as grid search) (gamma_values):
        """
        wrapper = self.init_wrapper(**kwargs)
        if wrapper is None:
            return False

        res = False
        try:
            self.data_dict = {}
            p = self.find_by_name(name="gamma_values")
            gsl = None if p is None else p.decode_string(p.value)
            src_img = wrapper.current_image
            if self.get_value_of("test_only") == 0:
                try:
                    force_directories(self.output_path)
                except Exception as e:
                    logger.error(f"Unable to create folder: {repr(e)}")
            if not self.output_path:
                logger.error("Failed : Missing folder parameter")
            elif gsl:
                self.add_value(key="source_name", value=wrapper.name, force_add=True)
                for gamma_value in gsl:
                    self.save_image(
                        image=src_img, gamma=float(gamma_value), path=self.output_path
                    )
                res = True
            else:
                self.add_value(key="source_name", value=wrapper.name, force_add=True)
                self.save_image(image=src_img, gamma=1, path=self.output_path)
                res = True
        except Exception as e:
            logger.error(
                f'Failed to process {self. name}: "{repr(e)}"',
            )
            res = False
        else:
            pass
        finally:
            self.result = len(self.data_dict) > 0
            return res
    def __init__(self, prefix):
        force_directories("saved_data")
        db_path = os.path.join(
            ipso_folders.get_path("saved_data", force_creation=True),
            f"{prefix}_annotations.db",
        )
        engine = create_engine(f"sqlite:///{db_path}")

        Session = sessionmaker(bind=engine)
        self.session = Session()
        BaseOrmClass.metadata.create_all(engine)
    def process_groups(self, groups_list):
        # Build images and data
        if groups_list:
            force_directories(self.options.partials_path)
            logger.info(f"   --- Processing {len(groups_list)} files ---")
            self.init_progress(total=len(groups_list),
                               desc="Processing images")

            max_cores = min([10, mp.cpu_count()])
            if isinstance(self.multi_thread, int):
                num_cores = min(self.multi_thread, max_cores)
            elif isinstance(self.multi_thread, bool):
                if self.multi_thread:
                    num_cores = max_cores
                else:
                    num_cores = 1
            else:
                num_cores = 1
            if (num_cores > 1) and len(groups_list) > 1:
                pool = mp.Pool(num_cores)
                chunky_size_ = num_cores
                for i, res in enumerate(
                        pool.imap_unordered(
                            _pipeline_worker,
                            ((
                                fl,
                                self.options,
                                self.script,
                                None if self._target_database is None else
                                self._target_database.copy(),
                            ) for fl in groups_list),
                            chunky_size_,
                        )):
                    if self.check_abort():
                        logger.info("User stopped process")
                        break
                    self.handle_result(res, i, len(groups_list))
            else:
                for i, fl in enumerate(groups_list):
                    res = _pipeline_worker([
                        fl,
                        self.options,
                        self.script,
                        None if self._target_database is None else
                        self._target_database.copy(),
                    ])
                    if self.check_abort():
                        logger.info("User stopped process")
                        break
                    self.handle_result(res, i, len(groups_list))
            self.close_progress()
            logger.info("   --- Files processed ---")
Exemple #6
0
 def _write_images_output_mosaic(self):
     """Prints debug mosaic"""
     try:
         canvas = self.build_mosaic((1440, 2560, 3), self._mosaic_data)
         tmp_path = "{}{}".format(self.dst_path, "mosaics")
         tmp_path = os.path.join(tmp_path, "")
         force_directories(tmp_path)
         tmp_path = "{}{}.jpg".format(tmp_path, self.name)
         cv2.imwrite(tmp_path, canvas)
     except Exception as e:
         # Unsupported format detected
         print('Exception: "{}" - Image: "{}", unsupported mosaic'.format(
             repr(e), str(self)))
    def yield_test_process_groups(self, groups_list):
        # Build images and data
        if groups_list:
            force_directories(self.options.partials_path)
            logger.info(f"   --- Processing {len(groups_list)} files ---")
            self.init_progress(
                total=len(groups_list),
                desc="Processing images",
                yield_mode=True,
            )

            max_cores = min([10, mp.cpu_count()])
            if isinstance(self.multi_thread, int):
                num_cores = min(self.multi_thread, max_cores)
            elif isinstance(self.multi_thread, bool):
                if self.multi_thread:
                    num_cores = max_cores
                else:
                    num_cores = 1
            else:
                num_cores = 1
            if (num_cores > 1) and len(groups_list) > 1:
                pool = mp.Pool(num_cores)
                chunky_size_ = num_cores
                total = len(groups_list)
                for i, res in enumerate(
                        pool.imap_unordered(
                            _dummy_worker,
                            ((fl) for fl in groups_list),
                            chunky_size_,
                        )):
                    if self.check_abort():
                        logger.info("User stopped process")
                        break
                    yield {"step": i, "total": total}
                    logger.info(f"Test process (multi thread): {res}")
                    sleep(0.1)
            else:
                total = len(groups_list)
                for i, fl in enumerate(groups_list):
                    if self.check_abort():
                        logger.info("User stopped process")
                        break
                    yield {"step": i, "total": total}
                    logger.info(f"Test process (single thread): {fl}")
                    sleep(0.1)

            self.close_progress()
            logger.info("   --- Files processed ---")
Exemple #8
0
    def save_image(self, image, gamma, path):
        test_only = self.get_value_of("test_only") == 1
        if self.get_value_of("add_sub_folder") == 1:
            path = os.path.join(path, self.wrapper.file_handler.experiment, "")
            force_directories(path)

        file_ext = self.get_value_of("output_format")
        if file_ext == "source":
            file_ext = self.wrapper.file_handler.file_ext
        else:
            file_ext = f".{file_ext}"

        if gamma != 1:
            inv_gamma = 1.0 / gamma
            table = np.array(
                [((i / 255.0) ** inv_gamma) * 255 for i in np.arange(0, 256)]
            ).astype("uint8")
            img = cv2.LUT(src=image, lut=table)
            root_file_name = f"{self.wrapper.file_handler.file_name_no_ext}_{gamma:.2f}"
        else:
            img = image.copy()
            root_file_name = self.wrapper.file_handler.file_name_no_ext

        for is_write, image, text in zip(
            [
                self.get_value_of("original") == 1,
                self.get_value_of("r90") == 1,
                self.get_value_of("r180") == 1,
                self.get_value_of("r270") == 1,
                self.get_value_of("flip_h") == 1,
                self.get_value_of("flip_v") == 1,
            ],
            [
                img.copy(),
                cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE),
                cv2.rotate(img, cv2.ROTATE_180),
                cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE),
                cv2.flip(img, +1),
                cv2.flip(cv2.transpose(img), 0),
            ],
            ["", "_r90", "_r180", "_r270", "_flip_h", "_flip_v"],
        ):
            if is_write:
                new_name = f"{root_file_name}{text}.{file_ext}"
                self.add_value(key=f"img{text}", value=new_name, force_add=True)
                if test_only:
                    self.wrapper.store_image(image, new_name)
                else:
                    cv2.imwrite(filename=os.path.join(path, new_name), img=image)
Exemple #9
0
 def _write_images_output_mosaic(self):
     """Prints debug mosaic"""
     try:
         if self.is_vis:
             canvas = self.build_mosaic(
                 (1440, 2560, 3),
                 np.array(
                     [
                         [
                             "source",
                             "fix_wb_0_50",
                             "mask_vab",
                             "mask_erode_3_2_times",
                         ],
                         [
                             "mask_dilate_3_2_times",
                             "mask_rois_applyed",
                             "pseudo_on",
                             "shapes",
                         ],
                     ]
                 ),
             )
         elif self.is_fluo:
             canvas = self.build_mosaic((1440, 2560, 3), self._mosaic_data)
         elif self.is_nir:
             canvas = self.build_mosaic(
                 (1440, 2560, 3),
                 np.array(["source", "", "", "", "", "", "pseudo_on", "shapes"]),
             )
         else:
             raise NotImplementedError
         tmp_path = "{}{}".format(self.dst_path, "mosaics")
         tmp_path = os.path.join(tmp_path, "")
         force_directories(tmp_path)
         tmp_path = "{}{}.jpg".format(tmp_path, self.name)
         cv2.imwrite(tmp_path, canvas)
     except Exception as e:
         # Unsupported format detected
         print(
             'Exception: "{}" - Image: "{}", unsupported mosaic'.format(
                 repr(e), str(self)
             )
         )
Exemple #10
0
 )
 plant_lst = get_query_items(column="plant", experiment=experiment)
 angle_lst = get_query_items(column="angle", experiment=experiment)
 date_list = [
     item.replace("-", "/")
     if isinstance(item, str) else item.strftime(_DATE_FORMAT)
     for item in get_query_items(column="date", experiment=experiment)
 ]
 plants = st.multiselect("Plants", plant_lst,
                         plant_lst if job_choice == "Single plant" else [])
 dates = st.multiselect("Dates", date_list, date_list)
 num_cores = min([MAX_CORES, num_cores])
 if job_choice == "Single plant":
     angles = st.multiselect("View options", angle_lst, angle_lst)
     if st.button("Build videos"):
         force_directories(dst_folder)
         st.write(f"Building {len(plants)} videos using {num_cores} cores")
         current_progress = st.progress(0)
         total_ = len(plants)
         if num_cores > 1:
             pool = mp.Pool(num_cores)
             chunky_size_ = 1
             for i, _ in enumerate(
                     pool.imap_unordered(
                         build_single_plant_video,
                         ((
                             plant_,
                             dst_folder,
                             current_database.copy(),
                             dates,
                             experiment,
    def build_channel_mask(self, source_image, **kwargs):
        try:
            # Add cover ROI
            folder_ = os.path.join(os.path.dirname(__file__), "stored_data/")
            pickled_file = f"{folder_}{self.name}_inneCircleRegion.pkl"
            force_directories(folder_)
            if os.path.isfile(pickled_file):
                with open(pickled_file, "rb") as f:
                    circle = pickle.load(f)
            else:
                kwargs_ = dict(
                    wrapper=self,
                    source_file="cropped_source",
                    operator="sobel",
                    min_radius=550,
                    max_radius=700,
                    min_distance=10,
                )
                if self.is_dark_cover():
                    kwargs_.update(
                        dict(channel="s",
                             step_radius=10,
                             max_peaks=4,
                             threshold=80))
                else:
                    kwargs_.update(
                        dict(channel="rd", threshold=133, max_peaks=2))
                with IptHoughCircles(**kwargs_) as (res, ed):
                    if not res or not ed.result:
                        self.error_holder.add_error("Unable to detect circles")
                        circle = None
                    else:
                        circles = sorted(ed.result,
                                         key=lambda circle_: circle_[2])
                        circle = circles[0]
                        with open(pickled_file, "wb") as f:
                            pickle.dump(circle, f)

            if self.is_dark_cover():
                if circle is not None:
                    self.add_circle_roi(
                        circle[0] + 502,
                        circle[1] + 224,
                        circle[2] - 4,
                        "safe_ish_roi",
                        "safe_ish",
                    )
                    self.add_circle_roi(
                        circle[0] + 502,
                        circle[1] + 224,
                        circle[2] - 32,
                        "safe_roi",
                        "safe",
                    )

                    mask_inner = self.build_mask(
                        source_image,
                        **dict(
                            is_store_images=True,
                            merge_action="multi_and",
                            params_list=[
                                dict(channel="h", method="otsu", invert=True),
                                dict(channel="b", method="otsu", invert=False),
                            ],
                        ),
                    )
                    mask_inner = self.open(mask_inner,
                                           kernel_size=3,
                                           proc_times=1)
                    mask_inner = self.keep_roi(src_mask=mask_inner,
                                               roi="safe_ish_roi",
                                               dbg_str="mask_inner")

                    mask_outer = self.build_mask(
                        source_image,
                        **dict(
                            is_store_images=True,
                            merge_action="multi_and",
                            params_list=[
                                dict(channel="wl_680",
                                     method="otsu",
                                     invert=True),
                                dict(channel="wl_905",
                                     method="otsu",
                                     invert=False),
                                dict(
                                    channel="h",
                                    min_t=10,
                                    max_t=70,
                                    morph_op="open",
                                    kernel_size=3,
                                    proc_times=2,
                                ),
                                dict(channel="b", method="otsu", invert=False),
                            ],
                        ),
                    )
                    mask_outer = self.delete_roi(mask_outer, "safe_ish_roi",
                                                 "mask_outer")
                    mask_outer = self.open(mask_outer,
                                           kernel_size=5,
                                           proc_times=2,
                                           dbg_text="mask_open")

                    mask = self.multi_or((mask_inner, mask_outer))
                    mask = self.keep_roi(mask, "main_roi")
                else:
                    mask = self.build_mask(
                        source_image,
                        **dict(
                            is_store_images=True,
                            merge_action="multi_and",
                            params_list=[
                                dict(channel="wl_680",
                                     method="otsu",
                                     invert=True),
                                dict(channel="wl_905",
                                     method="otsu",
                                     invert=False),
                                dict(channel="h", method="otsu", invert=True),
                                dict(channel="b", method="otsu", invert=False),
                            ],
                        ),
                    )
                    mask = self.keep_roi(mask,
                                         "main_roi",
                                         dbg_str="mask_roi_raw")
                    mask = self.open(mask,
                                     kernel_size=5,
                                     proc_times=2,
                                     dbg_text="mask_open")
            else:
                self.add_circle_roi(
                    circle[0] + 502,
                    circle[1] + 224,
                    circle[2] - 4,
                    "safe_ish_roi",
                    "safe_ish",
                )
                self.add_circle_roi(circle[0] + 502, circle[1] + 224,
                                    circle[2] - 64, "safe_roi", "safe")
                mask_inner = self.build_mask(
                    source_image,
                    **dict(
                        is_store_images=True,
                        merge_action="multi_and",
                        params_list=[
                            dict(
                                channel="h",
                                min_t=3,
                                max_t=70,
                                morph_op="close",
                                kernel_size=3,
                                proc_times=2,
                            ),
                            dict(channel="b", method="otsu", invert=False),
                        ],
                    ),
                )
                mask_inner = self.keep_roi(mask_inner,
                                           "safe_ish_roi",
                                           dbg_str="mask_inner")

                mask_outer = self.build_mask(
                    source_image,
                    **dict(
                        is_store_images=True,
                        merge_action="multi_and",
                        params_list=[
                            dict(channel="h", method="otsu", invert=True),
                            dict(channel="s", method="otsu", invert=False),
                            dict(channel="l", method="otsu", invert=False),
                            dict(channel="b", method="otsu", invert=False),
                        ],
                    ),
                )
                mask_outer = self.delete_roi(mask_outer, "safe_ish_roi",
                                             "mask_outer")

                mask = self.multi_or((mask_inner, mask_outer))
                mask = self.open(mask,
                                 kernel_size=5,
                                 proc_times=1,
                                 dbg_text="mask_open")
        except Exception as e:
            self.error_holder.add_error(
                f'Failed to build channel mask because "{repr(e)}"')
            return False
        else:
            self.mask = mask
            self.store_image(self.mask, "channel_mask", self.rois_list)
            if self.mask is None:
                return False
            else:
                return np.count_nonzero(self.mask) > 0
 def init_rois(self):
     folder_ = os.path.join(os.path.dirname(__file__), "stored_data/")
     pickled_file = f"{folder_}{self.name}_inneCircleRegion.pkl"
     force_directories(folder_)
     if os.path.isfile(pickled_file):
         with open(pickled_file, "rb") as f:
             circle = pickle.load(f)
     else:
         with IptHoughCircles(
             wrapper=self,
             min_radius=498,
             max_radius=598,
             max_peaks=4,
             line_width=20,
             keep_only_one=1,
             target_position="MIDDLE_CENTER",
             expand_circle=-40,
             operator="sobel",
             threshold=135,
         ) as (res, ed):
             if not res or not ed.result:
                 self.error_holder.add_error("Unable to detect circles")
                 circle = None
             else:
                 circles = sorted(ed.result, key=lambda circle_: circle_[2])
                 circle = circles[0]
                 with open(pickled_file, "wb") as f:
                     pickle.dump(circle, f)
     if circle is not None:
         self.add_circle_roi(
             left=circle[0],
             top=circle[1],
             radius=circle[2] - 20,
             name="pot_exterior",
             tag="none",
         )
         self.add_circle_roi(
             left=circle[0],
             top=circle[1],
             radius=circle[2] - 150,
             name="IptExposureChecker",
             tag="process_roi",
         )
         self.add_circle_roi(
             left=circle[0],
             top=circle[1],
             radius=circle[2] - 150,
             name="IptLinearTransformation",
             tag="process_roi",
         )
     else:
         self.error_holder.add_error("Unable to detect pot")
         radius_ = 500
         self.add_circle_roi(
             left=730 + radius_,
             top=484 + radius_,
             radius=radius_,
             name="pot_exterior",
             tag="none",
         )
     self.add_rect_roi(
         left=872,
         width=666,
         top=820,
         height=728,
         name="plant_guessed_position",
         tag="helper",
     )
Exemple #13
0
    def process_wrapper(self, **kwargs):
        """
        Visualization helper:
        'With the current image and a mask build a visualization for selected features
        Real time: True

        Keyword Arguments (in parentheses, argument name):
            * Activate tool (enabled): Toggle whether or not tool is active
            * Save generated image (save_image):
            * Name in csv (img_name):
            * Image output format (output_format):
            * Output naming convention (output_name):
            * Prefix or suffix (prefix_suffix):
            * Pseudo color channel (channel):
            * Select pseudo color map (color_map):
            * Foreground (foreground):
            * Background color (fore_color):
            * Background (background):
            * Background color (bcg_color):
            * Background intensity (bck_grd_luma):
            * Normalize source image (normalize):
            * Name of ROI to be used (roi_names): Operation will only be applied inside of ROI
            * ROI selection mode (roi_selection_mode):
            * Contour thickness (contour_thickness):
            * Add numeric value for contour (cnt_num):
            * Hull thickness (hull_thickness):
            * Bounding rectangle thickness (bounding_rec_thickness):
            * Straight bounding rectangle thickness (straight_bounding_rec_thickness):
            * Enclosing circle thickness (enclosing_circle_thickness):
            * Centroid width (centroid_width):
            * Centroid line width (centroid_line_width):
            * Add numeric value for centroid x value (cx_num):
            * Add numeric value for centroid y value (cy_num):
            * Height thickness (height_thickness):
            * Width thickness (width_thickness):
        """

        wrapper = self.init_wrapper(**kwargs)
        if wrapper is None:
            return False

        res = False
        try:
            if self.get_value_of("enabled") == 1:
                if self.get_value_of("normalize") == 1:
                    img = call_ipt(ipt_id="IptNormalize", source=wrapper)
                else:
                    img = wrapper.current_image

                self.result = wrapper.draw_image(
                    src_image=img,
                    src_mask=self.get_mask(),
                    **self.params_to_dict(
                        include_input=True,
                        include_output=False,
                        include_neutral=False,
                    ),
                )

                if self.get_value_of("save_image") != 0:
                    dst_path = self.build_path()
                    self.add_value(
                        key=self.get_value_of("img_name"),
                        value=dst_path,
                        force_add=True,
                    )
                    force_directories(self.output_path)
                    cv2.imwrite(filename=dst_path, img=self.result)
                wrapper.store_image(self.result, "visualization")
                self.demo_image = self.result
                res = True
            else:
                wrapper.store_image(wrapper.current_image, "current_image")
                res = True
        except Exception as e:
            res = False
            logger.error(f"Data viz FAILED, exception: {repr(e)}")
        else:
            pass
        finally:
            return res
Exemple #14
0
def launch(**kwargs):
    def exit_error_message(msg: str) -> None:
        print(msg)
        logger.error(msg)

    start = timer()

    if "user" in kwargs and "password" in kwargs:
        dbp.master_password = kwargs.pop("user"), kwargs.pop("password")

    # Script
    script = kwargs.get("script", None)
    if script is not None and os.path.isfile(script):
        with open(script, "r") as f:
            script = json.load(f)
    kwargs["script"] = script

    # Image(s)
    image = kwargs.get("image", None)
    image_list = kwargs.get("image_list", None)
    image_folder = kwargs.get("image_folder", None)
    src_count = (1 if "experiment" in kwargs and "database" in kwargs else len(
        [src for src in [image, image_list, image_folder] if src is not None]))
    if src_count == 0:
        exit_error_message("Missing source images")
        return 1
    elif src_count > 1:
        exit_error_message("Too many sources")
        return 1

    if image is not None:
        kwargs["images"] = [image]
    elif image_list is not None:
        with open(image_list, "r") as f:
            kwargs["images"] = [
                img.os.replace("\n", "") for img in f.readlines()
            ]
    elif image_folder is not None:
        img_lst = ImageList((".jpg", ".tiff", ".png", ".bmp"))
        img_lst.add_folder(image_folder)
        kwargs["images"] = img_lst.filter(masks=None)

    # State
    stored_state = kwargs.pop("stored_state", None)

    res = restore_state(blob=stored_state, overrides=kwargs)

    # Retrieve images
    image_list_ = res.get("images", None)

    # Build database
    db_data = res.get("database_data", None)
    database = res.get("database", None)
    experiment = res.get("experiment", None)
    if db_data is None:
        if database is None:
            db = None
        else:
            db = dbf.db_info_to_database(
                DbInfo(
                    display_name=experiment,
                    target=database,
                    dbms="pandas",
                ))
    else:
        db = dbf.db_info_to_database(dbb.DbInfo(**db_data))

    if experiment is not None:
        if "sub_folder_name" not in res or not res["sub_folder_name"]:
            res["sub_folder_name"] = experiment
        if "csv_file_name" not in res or not res["csv_file_name"]:
            res["csv_file_name"] = f"{experiment.lower()}_raw_data"
    else:
        db = dbf.db_info_to_database(dbb.DbInfo(**db_data))

    # Retrieve output folder
    output_folder_ = res.get("output_folder", None)
    if not output_folder_:
        exit_error_message("Missing output folder")
        return 1
    elif res.get("sub_folder_name", ""):
        output_folder_ = os.path.join(output_folder_, res["sub_folder_name"],
                                      "")
    else:
        output_folder_ = os.path.join(output_folder_, "")
    force_directories(output_folder_)
    csv_file_name = res.get("csv_file_name", None)
    if not csv_file_name:
        exit_error_message("Missing output file name")
        return 1
    if IS_USE_MULTI_THREAD and "thread_count" in res:
        try:
            mpc = int(res["thread_count"])
        except Exception as e:
            mpc = False
    else:
        mpc = False

    try:
        if isinstance(res["script"], str) and os.path.isfile(res["script"]):
            script = LoosePipeline.load(res["script"])
        elif isinstance(res["script"], dict):
            script = LoosePipeline.from_json(json_data=res["script"])
        else:
            exit_error_message("Failed to load script: Unknown error")
            return 1
    except Exception as e:
        exit_error_message(f"Failed to load script: {repr(e)}")
        return 1

    # Build pipeline processor
    pp = PipelineProcessor(
        database=None if db is None else db.copy(),
        dst_path=output_folder_,
        overwrite=res["overwrite"],
        seed_output=res["append_time_stamp"],
        group_by_series=res["generate_series_id"],
        store_images=False,
    )
    if not image_list_:
        pp.grab_files_from_data_base(
            experiment=db.db_info.display_name.lower(),
            order_by="plant",
            **db.main_selector,
        )
    else:
        pp.accepted_files = image_list_

    if res.get("randomize", False) is True:
        random.shuffle(pp.accepted_files)

    logger.info("Process summary")
    logger.info("_______________")
    logger.info(f'database: {res.get("database_data", None)}')
    logger.info(f'Output folder: {res["output_folder"]}')
    logger.info(f'CSV file name: {res["csv_file_name"]}')
    logger.info(f'Overwrite data: {res["overwrite"]}')
    logger.info(f'Subfolder name: {res["sub_folder_name"]}')
    logger.info(f'Append timestamp to root folder: {res["append_time_stamp"]}')
    logger.info(f'Generate series ID: {res["generate_series_id"]}')
    logger.info(f'Series ID time delta allowed: {res["series_id_time_delta"]}')
    logger.info(f'Build annotation ready CSV: {res["build_annotation_csv"]}')
    logger.info(f"Images: {len(pp.accepted_files)}")
    logger.info(f"Concurrent processes count: {mpc}")
    logger.info(f"Script summary: {str(script)}")

    if pp.accepted_files is None or len(pp.accepted_files) < 1:
        exit_error_message("No images to process")
        return 1

    pp.progress_callback = kwargs.get("progress_callback", None)
    pp.error_callback = kwargs.get("error_callback", None)
    pp.ensure_root_output_folder()

    pp.script = script
    if not pp.accepted_files:
        logger.error("Nothing to precess")
        return 1

    # Process data
    groups_to_process = pp.prepare_groups(res["series_id_time_delta"])
    if res["build_annotation_csv"]:
        try:
            if pp.options.group_by_series:
                files, luids = map(list, zip(*groups_to_process))
                wrappers = [
                    file_handler_factory(files[i], db)
                    for i in [luids.index(x) for x in set(luids)]
                ]
            else:
                wrappers = [
                    file_handler_factory(f, db)
                    for f in tqdm.tqdm(groups_to_process,
                                       desc="Building annotation CSV")
                ]
            pd.DataFrame.from_dict({
                "plant": [i.plant for i in wrappers],
                "date_time": [i.date_time for i in wrappers],
                "disease_index": "",
            }).sort_values(by=["plant", "date_time"],
                           axis=0,
                           na_position="first",
                           ascending=True).to_csv(
                               os.path.join(
                                   pp.options.dst_path,
                                   f"{csv_file_name}_diseaseindex.csv"),
                               index=False,
                           )
        except Exception as e:
            preffix = "FAIL"
            logger.exception("Unable to build disease index file")
        else:
            preffix = "SUCCESS"
            logger.info("Built disease index file")
        print(f"{preffix} - Disease index file")

    groups_to_process_count = len(groups_to_process)
    if groups_to_process_count > 0:
        pp.multi_thread = mpc
        pp.process_groups(groups_list=groups_to_process)

    # Merge dataframe
    pp.merge_result_files(csv_file_name=csv_file_name + ".csv")
    logger.info(
        f"Processed {groups_to_process_count} groups/images in {format_time(timer() - start)}"
    )

    # Build videos

    print("Done, see logs for more details")

    return 0
Exemple #15
0
    def process_wrapper(self, **kwargs):
        """
        Copy or rename image:
        Copies an image, renaming it if needed
        Real time: False

        Keyword Arguments (in parentheses, argument name):
            * Activate tool (enabled): Toggle whether or not tool is active
            * Target folder (path): Can be overridden at process call
            * Image to copy (source_image):
            * Custom image name (named_source):
            * Image output format (output_format):
            * Output naming convention (output_name):
            * Prefix or suffix (prefix_suffix):
            * Name of ROI to be used (roi_names): Operation will only be applied inside of ROI
            * ROI selection mode (roi_selection_mode):
            * Resize images if with is larger than (max_width):
            * Resize images if height is larger than (max_height):
            * Keep aspect ratio (kar):
        """

        wrapper = self.init_wrapper(**kwargs)
        if wrapper is None:
            return False

        res = False
        try:
            self.data_dict = {}
            if self.get_value_of("enabled") == 1:
                # Get source image
                source = self.get_value_of("source_image")
                var_name = source
                if source == "source":
                    img = wrapper.source_image
                elif source == "custom":
                    var_name = self.get_value_of("named_source")
                    img = wrapper.retrieve_stored_image(var_name)
                else:
                    img = None
                    logger.error(f"Copy or rename image FAILED, unknown source: {source}")
                    return
                if img is None:
                    logger.error(f"Copy or rename image FAILED, missing source: {source}")
                    return

                # Apply ROIs
                rois = self.get_ipt_roi(
                    wrapper=wrapper,
                    roi_names=self.get_value_of("roi_names").replace(" ", "").split(","),
                    selection_mode=self.get_value_of("roi_selection_mode"),
                )
                img = wrapper.apply_roi_list(img=img, rois=rois)

                dst_path = self.build_path()

                # Add image to list
                wrapper.store_image(image=img, text="copied_or_renamed_image")

                # Add data
                self.add_value(key="source_name", value=wrapper.name, force_add=True)
                if "image" not in var_name:
                    var_name += "_image"
                self.add_value(key=var_name, value=dst_path, force_add=True)

                # Resize if needed
                max_width = self.get_value_of("max_width")
                max_height = self.get_value_of("max_height")
                kar = self.get_value_of("kar") == 1

                if max_width != 0 and max_height != 0:
                    r = regions.RectangleRegion(width=max_width, height=max_height)
                elif max_width == 0 and max_height != 0:
                    r = regions.RectangleRegion(width=wrapper.width, height=max_height)
                elif max_width != 0 and max_height == 0:
                    r = regions.RectangleRegion(width=max_width, height=wrapper.height)
                else:
                    r = None
                if r is not None:
                    img = ipc.resize_image(
                        src_img=img, target_rect=r, keep_aspect_ratio=kar
                    )

                # Copy image
                force_directories(self.output_path)
                cv2.imwrite(filename=dst_path, img=img)
                res = True
            else:
                wrapper.store_image(wrapper.current_image, "current_image")
                res = True
        except Exception as e:
            res = False
            logger.exception(f'Failed to process {self. name}: "{repr(e)}"')
        else:
            pass
        finally:
            self.result = res
            return res
    def run(self):
        self.signals_holder.on_starting.emit()
        launch_state = "ok"
        try:
            if self.pipeline.accepted_files:
                # build series
                if not self.is_continue():
                    launch_state = "abort"
                    return
                groups_to_process = self.pipeline.prepare_groups(
                    self.group_time_delta)

                # Process all groups
                if not self.is_continue():
                    launch_state = "abort"
                    return
                groups_to_process_count = len(groups_to_process)
                self.signals_holder.on_launching.emit(groups_to_process_count)
                if groups_to_process_count > 0:
                    force_directories(self.pipeline.options.partials_path)
                    for i, item in enumerate(groups_to_process):
                        item_worker = IpsoGroupProcessor(
                            on_ended=self.on_item_ended,
                            on_log_event=self.on_log_item_event,
                            on_image_ready=self.on_item_image_ready,
                            item=item,
                            database=self._target_database,
                            options=self.pipeline.options,
                            script=None if self.pipeline.script is None else
                            self.pipeline.script.copy(),
                            index=i,
                            total=groups_to_process_count,
                        )
                        if not self.is_continue():
                            launch_state = "abort"
                            return
                        self.signals_holder.on_progress.emit(
                            i, len(groups_to_process))
                        if self._multithread:
                            self.item_thread_pool.start(item_worker)
                        else:
                            item_worker.run()
            else:
                self.signals_holder.on_feedback_log_str.emit(
                    "No images to process",
                    "No images to process",
                    log_level=logging.WARNING,
                )
        except Exception as e:
            logger.exception(f'Exception "{repr(e)}" while mass processing')
            launch_state = "exception"
        finally:
            if launch_state != "ok":
                self.signals_holder.on_feedback_log_str.emit(
                    "Failed to launch mass process, cf. log",
                    "Failed to launch mass process, cancelling queued jobs",
                    False,
                )
                self.item_thread_pool.clear()
                self.item_thread_pool.waitForDone(-1)
            self.signals_holder.on_started.emit(launch_state)