示例#1
0
文件: model.py 项目: AAIR-lab/GHN
    def execute(self):

        # https://stackoverflow.com/questions/42504669/keras-tensorflow-and-multiprocessing-in-python
        # It seems that keras does not play well with multiprocessing fork().
        #
        # Thus, we will do all keras operations from a subprocess of main().
        executor.multicore_execute(
            self._execute,
            (),
            lambda chunk_size: (tuple(), ),
            max_workers=1,
            chunk_size=float("nan"),
            use_mpi=False)

        return []
示例#2
0
    def execute(self):

        max_workers = self.get_value("max_workers")
        chunk_size = self.get_value("chunk_size")
        use_mpi = self.get_value("use_mpi")

        force_single_core = self.get_value("force_single_core")

        self.initialize_directories()

        # Generate the domain as well.
        self.generate_domain()

        # Minimum one iteration is always executed by the generator.
        max_index = 1

        # Detect how many iterations and which keys need to be replaced.
        iter_set = set()
        for key in self._phase_dict.keys():

            if isinstance(self._phase_dict[key], list):

                max_index = max(max_index, len(self._phase_dict[key]))
                iter_set.add(key)

        results = []

        # Get a copy of the phase dict with the lists.
        # These will be replaced on the fly.
        phase_dict_copy = copy.deepcopy(self._phase_dict)
        problem_start_no = 0
        for i in range(max_index):

            for key in iter_set:

                try:

                    self._phase_dict[key] = phase_dict_copy[key][i]
                except IndexError:

                    self._phase_dict[key] = phase_dict_copy[key][-1]

            if force_single_core:

                results += executor.singlecore_execute(
                    self.generate_problem, self.generate_args(float("inf"),
                                                              problem_start_no))
            else:

                results += executor.multicore_execute(self.generate_problem,
                                                      (problem_start_no, ),
                                                      self.generate_args,
                                                      max_workers, chunk_size,
                                                      use_mpi)

            # Increment the start problem no. for the next set.
            problem_start_no = problem_start_no \
                + phase_dict_copy["total_problems"]

        return results
示例#3
0
文件: solver.py 项目: AAIR-lab/GHN
    def execute(self):

        max_workers = self.get_value("max_workers")
        chunk_size = self.get_value("chunk_size")
        use_mpi = self.get_value("use_mpi")

        force_single_core = self.get_value("force_single_core")

        problem_dir = file.get_relative_path(self.get_value("input_dir"),
                                             self._parent_dir)

        problem_list = file.get_file_list(problem_dir,
                                          constants.PROBLEM_FILE_REGEX)

        domain_list = file.get_file_list(problem_dir,
                                         constants.DOMAIN_FILE_REGEX)

        assert len(domain_list) == 1
        domain_file = domain_list[0]

        if force_single_core:

            results = executor.singlecore_execute(self.solve,
                                                  (domain_file, problem_list))
        else:

            results = executor.multicore_execute(self.solve,
                                                 (domain_file, problem_list),
                                                 self.generate_args,
                                                 max_workers, chunk_size,
                                                 use_mpi)

        return results
示例#4
0
文件: model.py 项目: AAIR-lab/GHN
    def get_training_data(self, problem_dir, max_workers, chunk_size, use_mpi,
                          force_single_core, abstract_domain=None):

        solver_name = self.get_value("solver_name")

        problem_list = file.get_file_list(problem_dir,
                                          constants.PROBLEM_FILE_REGEX)

        domain_list = file.get_file_list(problem_dir,
                                         constants.DOMAIN_FILE_REGEX)

        assert len(domain_list) == 1
        domain_filepath = domain_list[0]

        if force_single_core:

            training_data = executor.singlecore_execute(
                self._get_training_data,
                (domain_filepath, problem_list, solver_name, abstract_domain))
        else:

            training_data = executor.multicore_execute(
                self._get_training_data,
                (domain_filepath, problem_list, solver_name, abstract_domain),
                self._gen_get_training_data_args,
                max_workers, chunk_size,
                use_mpi)

        return training_data
示例#5
0
    def run(self,
            max_workers=multiprocessing.cpu_count(),
            chunk_size=20,
            use_mpi=False):

        results = executor.multicore_execute(self.generate, (),
                                             self.generate_args, max_workers,
                                             chunk_size, use_mpi)

        print(results)
示例#6
0
def example_multicore():

    a1 = range(100)
    a2 = range(100)
    a3 = range(100)

    results = executor.multicore_execute(example_add_multicore_arrays,
                                         (a1, a2, a3),
                                         example_generate_multicore_array_args,
                                         chunk_size=10)

    print(results)

    # Try a object oriented example.
    test_example = TestExample(a1, a2, a3)
    test_example.run()
示例#7
0
文件: model.py 项目: AAIR-lab/GHN
    def remap_training_data(self, remapped_abstract_domain, training_data_list,
                            max_workers, chunk_size, use_mpi,
                            force_single_core):

        if force_single_core:

            remapped_training_data = executor.singlecore_execute(
                self._remap_training_data,
                (remapped_abstract_domain, training_data_list))
        else:

            remapped_training_data = executor.multicore_execute(
                self._remap_training_data,
                (remapped_abstract_domain, training_data_list),
                self._gen_remap_training_data_args,
                max_workers, chunk_size,
                use_mpi)

        return remapped_training_data
示例#8
0
    def get_documents(self, input_dir):

        # Get the solution file list.
        solutions = file.get_file_list(input_dir,
                                       constants.SOLUTION_FILE_REGEX)

        # Get the list of solution properties.
        if self.get_value("force_single_core"):

            properties = executor.singlecore_execute(self._extract_properties,
                                                     (solutions))
        else:

            properties = executor.multicore_execute(
                self._extract_properties, (solutions, ), self._generate_args,
                self.get_value("max_workers"), self.get_value("chunk_size"),
                self.get_value("use_mpi"))

        # Create the database.
        db = TinyDB(storage=MemoryStorage)
        db.insert_multiple(properties)

        # Try applying any specified filter.
        try:

            query_str = self.get_value("filter")
            query = Query()
            (query)
            documents = db.search(eval(query_str))

        except KeyError:

            # If no filter, then all documents are a part of the data.
            documents = db.all()

        return documents