示例#1
0
    def __init__(self, params, work, async_proc_class, logging_level="info"):

        self.params = params
        self.async_process_class = async_proc_class
        self.images_path = os.path.join(params.series.path)
        images_dir_name = self.params.series.path.split("/")[-1]
        self.saving_path = os.path.join(
            os.path.dirname(params.series.path),
            str(images_dir_name) + "." + params.saving.postfix,
        )
        self.series = []
        self.processes = []
        self.async_process = []
        self.work = work

        # Logger
        log = os.path.join(
            self.saving_path,
            "log_" + time_as_str() + "_" + str(os.getpid()) + ".txt",
        )
        if not os.path.exists(self.saving_path):
            os.makedirs(self.saving_path)

        log_file = open(log, "w")
        sys.stdout = MultiFile([sys.stdout, log_file])
        config_logging("info", file=sys.stdout)
        # Managing dir paths
        assert os.listdir(self.images_path)
        if not os.path.exists(self.saving_path):
            os.makedirs(self.saving_path)
示例#2
0
def main():
    # Define path
    sub_path_image = "Images2"
    path_save = "../../image_samples/Karman/{}.results.async/".format(
        sub_path_image)
    # Logger
    log = os.path.join(
        path_save, "log_" + time_as_str() + "_" + str(os.getpid()) + ".txt")
    log_file = open(log, "w")
    sys.stdout = MultiFile([sys.stdout, log_file])
    config_logging("info", file=sys.stdout)
    # Managing dir paths
    path = "../../image_samples/Karman/{}/".format(sub_path_image)
    assert os.listdir(path)
    if not os.path.exists(path_save):
        os.makedirs(path_save)

    def partition(lst, n):
        """
        Partition evently lst into n sublists and
        add the last images of each sublist to the head
        of the next sublist ( in order to compute all piv )
        :param lst: a list
        :param n: number of sublist wanted
        :return: A sliced list
        """
        L = len(lst)
        assert 0 < n <= L
        s, r = divmod(L, n)
        t = s + 1
        lst = [lst[p:p + t] for p in range(0, r * t, t)
               ] + [lst[p:p + s] for p in range(r * t, L, s)]
        #  in order to compute all piv
        #  add the last images of each sublist to the head of the next sublist
        for i in range(1, n):
            lst[i].insert(0, lst[i - 1][-1])
        return lst

    nb_process = multiprocessing.cpu_count()
    # spliting images list
    listdir = os.listdir(path)

    if len(listdir) <= nb_process:  # if there is less piv to compute than cpu
        nb_process = len(listdir) - 1  # adapt process number
    print("nb process :{}".format(nb_process))
    listdir.sort()
    listdir = partition(listdir, nb_process)
    # making and starting processes
    processes = []
    for i in range(nb_process):
        async_piv = AsyncPiv(path, path_save)
        p = multiprocessing.Process(target=async_piv.a_process,
                                    args=(listdir[i], ))
        p.start()
    for p in processes:
        p.join()
示例#3
0
def main(args=None, for_testing=False):
    """Parse arguments and execute `fluidimviewer-pg`."""
    if args is None:
        args = parse_args()

    if args.verbose:
        config_logging("debug")
    else:
        config_logging()

    if args.slideshow:
        return slideshow(args.images, for_testing)
    else:
        return dock(args.images, for_testing)
示例#4
0
        # self.wq_load = WaitingQueueLoadImage(
        #     destination=self.wq_cpu,
        #     path_dir=path_dir, topology=self)

        super(TopologyDebug, self).__init__(
            [self.wq_load, self.wq_cpu, self.wq_save]
        )

        flist = glob(
            "/home/users/vishnu1as/useful/project/16MILESTONE/Data/"
            "Exp35_2016-06-30_N0.56_L6.0_V0.04_piv3d/PCO_side/level01.*/im*"
        )
        self.wq_load.update({os.path.basename(f): f for f in flist[:n]})

    def compute(self):
        super(TopologyDebug, self).compute()
        gc.collect()


if __name__ == "__main__":

    from fluidimage import config_logging

    config_logging("info")
    topology = TopologyDebug(20)
    topology.compute()
    print_memory_usage()

    plot()
    print(mem_log)
示例#5
0
    def __init__(
        self,
        path_dir=None,
        path_output=None,
        logging_level="info",
        nb_max_workers=None,
    ):

        super().__init__(logging_level=logging_level,
                         nb_max_workers=nb_max_workers)

        if path_dir is None:
            self.path_dir = "../../../image_samples/Karman/Images2"
        else:
            self.path_dir = path_dir

        if path_output is not None:
            if not os.path.exists(path_output):
                os.makedirs(path_output)
            self.path_output = path_output
        log = os.path.join(
            path_output,
            "log_" + time_as_str() + "_" + str(os.getpid()) + ".txt")

        stdout = sys.stdout
        if isinstance(stdout, MultiFile):
            stdout = _stdout_at_import

        stderr = sys.stderr
        if isinstance(stderr, MultiFile):
            stderr = _stderr_at_import

        self._log_file = open(log, "w")
        sys.stdout = MultiFile([stdout, self._log_file])
        sys.stderr = MultiFile([stderr, self._log_file])

        if logging_level is not None:
            for handler in logger.handlers:
                logger.removeHandler(handler)

        config_logging(logging_level, file=sys.stdout)

        if hasattr(self, "path_output"):
            logger.info("path results:\n" + self.path_output)

        self.img_counter = 0

        queue_names_img1 = self.add_queue("names img 1")
        queue_names_img2 = self.add_queue("names img 2")
        queue_array_couple = self.add_queue("array couples")
        queue_cpu1 = self.add_queue("queue_cpu1")
        queue_cpu2 = self.add_queue("queue_cpu2")

        self.add_work(
            "fill names",
            func_or_cls=self.fill_names,
            output_queue=(queue_names_img1, queue_names_img2),
            kind=("global", "one shot"),
        )
        self.add_work(
            "make couple",
            func_or_cls=self.make_couple,
            input_queue=(queue_names_img1, queue_names_img2),
            output_queue=queue_array_couple,
            kind=("global", "io"),
        )
        self.add_work(
            "cpu1",
            func_or_cls=self.cpu1,
            input_queue=queue_array_couple,
            output_queue=queue_cpu1,
            kind="server",
        )

        self.add_work(
            "cpu2",
            func_or_cls=self.cpu2,
            params_cls=None,
            input_queue=queue_cpu1,
            output_queue=queue_cpu2,
            kind="server",
        )

        self.add_work("save",
                      func_or_cls=self.save,
                      params_cls=None,
                      input_queue=queue_cpu2)
示例#6
0
    def __init__(
        self,
        topology,
        path_dir_result,
        nb_max_workers=None,
        nb_items_queue_max=4,
        logging_level="info",
        sleep_time=None,
        stop_if_error=False,
    ):
        del sleep_time
        self.topology = topology
        self.logging_level = logging_level
        self.stop_if_error = stop_if_error

        path_dir_result = Path(path_dir_result)
        path_dir_result.mkdir(exist_ok=True)
        self.path_dir_result = path_dir_result
        self._init_log_path()
        self._log_file = open(self._log_path, "w")

        stdout = sys.stdout
        if isinstance(stdout, MultiFile):
            stdout = sys.__stdout__

        stderr = sys.stderr
        if isinstance(stderr, MultiFile):
            stderr = sys.__stderr__

        sys.stdout = MultiFile([stdout, self._log_file])
        sys.stderr = MultiFile([stderr, self._log_file])

        if logging_level:
            for handler in logger.handlers:
                logger.removeHandler(handler)

            config_logging(logging_level, file=sys.stdout)

        if nb_max_workers is None:
            if config is not None:
                try:
                    nb_max_workers = eval(config["topology"]["nb_max_workers"])
                except KeyError:
                    pass

        # default nb_max_workers
        # Difficult: trade off between overloading and limitation due to input
        # output.  The user can do much better for a specific case.
        if nb_max_workers is None:
            if nb_cores < 16:
                nb_max_workers = nb_cores + 2
            else:
                nb_max_workers = nb_cores

        self.nb_max_workers = nb_max_workers

        if nb_items_queue_max is None:
            nb_items_queue_max = max(2 * nb_max_workers, 2)
        self.nb_items_queue_max = nb_items_queue_max

        self._has_to_stop = False
        if sys.platform != "win32":

            def handler_signals(signal_number, stack):
                del stack
                print(
                    f"signal {signal_number} received: set _has_to_stop to True."
                )
                self._has_to_stop = True

            signal.signal(12, handler_signals)

        # Picks up async works
        self.works = [
            work
            for work in self.topology.works
            if work.kind is None or "one shot" not in work.kind
        ]

        # to avoid a pylint warning
        self.t_start = None
示例#7
0
    def __init__(self,
                 path_output=None,
                 logging_level="info",
                 nb_max_workers=None):

        if path_output is not None:
            if not os.path.exists(path_output):
                os.makedirs(path_output)
            self.path_output = path_output
            log = os.path.join(
                path_output,
                "log_" + time_as_str() + "_" + str(os.getpid()) + ".txt",
            )
            self._log_file = open(log, "w")

            stdout = sys.stdout
            if isinstance(stdout, MultiFile):
                stdout = _stdout_at_import

            stderr = sys.stderr
            if isinstance(stderr, MultiFile):
                stderr = _stderr_at_import

            sys.stdout = MultiFile([stdout, self._log_file])
            sys.stderr = MultiFile([stderr, self._log_file])

        if logging_level is not None:
            for handler in logger.handlers:
                logger.removeHandler(handler)

            config_logging(logging_level, file=sys.stdout)

        self.queues = []
        self.works = []

        if nb_max_workers is None:
            nb_max_workers = _nb_max_workers

        self.nb_max_workers_io = max(int(nb_max_workers * 0.8), 2)
        self.nb_max_launch = max(int(self.nb_max_workers_io), 1)

        if nb_max_workers < 1:
            raise ValueError("nb_max_workers < 1")

        print("nb_cpus_allowed = {}".format(nb_cores))
        print("nb_max_workers = ", nb_max_workers)
        print("nb_max_workers_io = ", self.nb_max_workers_io)

        self.nb_max_workers = nb_max_workers
        self.nb_cores = nb_cores
        self.nb_items_lim = max(2 * nb_max_workers, 2)

        self._has_to_stop = False

        if sys.platform != "win32":

            def handler_signals(signal_number, stack):
                print("signal {} received: set _has_to_stop to True".format(
                    signal_number))
                self._has_to_stop = True

            signal.signal(12, handler_signals)
        self.t_start = time()