def do_refactoring(self):
     logger.info(f"Running {self.name}")
     logger.info(f"Parameters {self.params}")
     try:
         self.main(**self.params)
     except Exception as e:
         logger.error(f"Error in executing refactoring:\n {e}")
    def _evaluate(self,
                  x,  #
                  out,
                  *args,
                  **kwargs):
        """
        This method iterate over an Individual, execute the refactoring operation sequentially,
        and compute quality attributes for the refactored version of the program, as objectives of the search

        params:
        x[0] (Individual): x[0] is an instance of Individual (i.e., a list of refactoring operations)

        """
        # Stage 0: Git restore
        logger.debug("Executing git restore.")
        git_restore(config.PROJECT_PATH)
        update_understand_database(config.UDB_PATH)
        # Stage 1: Execute all refactoring operations in the sequence x
        logger.debug(f"Reached Individual with Size {len(x[0])}")
        for refactoring_operation in x[0]:
            refactoring_operation.do_refactoring()
            # Update Understand DB
            update_understand_database(config.UDB_PATH)
        # Stage 2: Computing quality attributes
        score = testability_main(config.UDB_PATH)
        logger.info(f"Testability Score: {score}")
        # Stage 3: Marshal objectives into vector
        out["F"] = np.array([-1 * score], dtype=float)
Example #3
0
    def _evaluate(self,
                  x,  #
                  out,
                  *args,
                  **kwargs):
        """

        This method iterate over a population, execute the refactoring operations in each individual sequentially,
        and compute quality attributes for the refactored version of the program, as objectives of the search

        Args:

            x (Population): x is a matrix where each row is an individual, and each column a variable. \
            We have one variable of type list (Individual) ==> x.shape = (len(Population), 1)


        """
        objective_values = []
        for k, individual_ in enumerate(x):
            # Stage 0: Git restore
            logger.debug("Executing git restore.")
            git_restore(config.PROJECT_PATH)
            logger.debug("Updating understand database after git restore.")
            update_understand_database(config.UDB_PATH)

            # Stage 1: Execute all refactoring operations in the sequence x
            logger.debug(f"Reached Individual with Size {len(individual_[0])}")
            for refactoring_operation in individual_[0]:
                refactoring_operation.do_refactoring()
                # Update Understand DB
                logger.debug(f"Updating understand database after {refactoring_operation.name}.")
                update_understand_database(config.UDB_PATH)

            # Stage 2:
            arr = Array('d', range(self.n_obj_virtual))
            if self.evaluate_in_parallel:
                # Stage 2 (parallel mood): Computing quality attributes
                p1 = Process(target=calc_qmood_objectives, args=(arr,))
                p2 = Process(target=calc_testability_objective, args=(config.UDB_PATH, arr,))
                p3 = Process(target=calc_modularity_objective, args=(config.UDB_PATH, arr,))
                p1.start(), p2.start(), p3.start()
                p1.join(), p2.join(), p3.join()
                o1 = sum([i for i in arr[:6]]) / 6.
                o2 = arr[6]
                o3 = arr[7]
            else:
                # Stage 2 (sequential mood): Computing quality attributes
                qmood_quality_attributes = DesignQualityAttributes(udb_path=config.UDB_PATH)
                o1 = qmood_quality_attributes.average_sum
                o2 = testability_main(config.UDB_PATH, initial_value=config.CURRENT_METRICS.get("TEST", 1.0))
                o3 = modularity_main(config.UDB_PATH, initial_value=config.CURRENT_METRICS.get("MODULE", 1.0))
                del qmood_quality_attributes

            # Stage 3: Marshal objectives into vector
            objective_values.append([-1 * o1, -1 * o2, -1 * o3])
            logger.info(f"Objective values for individual {k}: {[-1 * o1, -1 * o2, -1 * o3]}")

        # Stage 4: Marshal all objectives into out dictionary
        out['F'] = np.array(objective_values, dtype=float)
Example #4
0
    def do_refactoring(self):
        """

        Check preconditions and apply refactoring operation to source code

        Returns:

            result (boolean): The result statues of the applied refactoring

        """

        logger.info(f"Running {self.name}")
        logger.info(f"Parameters {self.params}")
        try:
            res = self.main(**self.params)
            logger.debug(f"Executed refactoring with result {res}")
            return res
        except Exception as e:
            logger.error(f"Unexpected error in executing refactoring:\n {e}")
            return False
def main():
    # Define search algorithms
    algorithms = list()
    # 1: GA
    algorithm = GA(
        pop_size=config.POPULATION_SIZE,
        sampling=SudoRandomInitialization(),
        # crossover=AdaptiveSinglePointCrossover(prob=0.8),
        crossover=get_crossover("real_k_point", n_points=2),
        mutation=BitStringMutation(),
        eliminate_duplicates=RefactoringSequenceDuplicateElimination()
    )
    algorithms.append(algorithm)

    # 2: NSGA II
    algorithm = NSGA2(pop_size=config.POPULATION_SIZE,
                      sampling=SudoRandomInitialization(),
                      # crossover=AdaptiveSinglePointCrossover(prob=0.8),
                      crossover=get_crossover("real_k_point", n_points=2),
                      mutation=BitStringMutation(),
                      eliminate_duplicates=RefactoringSequenceDuplicateElimination()
                      )
    algorithms.append(algorithm)

    # 3: NSGA III
    # Todo: Ask for best practices in determining ref_dirs
    ref_dirs = get_reference_directions("energy", 8, 90, seed=1)
    algorithm = NSGA3(ref_dirs=ref_dirs,
                      pop_size=config.POPULATION_SIZE,
                      sampling=SudoRandomInitialization(),
                      # crossover=AdaptiveSinglePointCrossover(prob=0.8),
                      crossover=get_crossover("real_k_point", n_points=2),
                      mutation=BitStringMutation(),
                      eliminate_duplicates=RefactoringSequenceDuplicateElimination()
                      )
    algorithms.append(algorithm)

    # Define problems
    problems = list()
    problems.append(
        ProblemSingleObjective(n_refactorings_lowerbound=config.LOWER_BAND, n_refactorings_upperbound=config.UPPER_BAND)
    )
    problems.append(
        ProblemMultiObjective(n_refactorings_lowerbound=config.LOWER_BAND, n_refactorings_upperbound=config.UPPER_BAND)
    )
    problems.append(
        ProblemManyObjective(n_refactorings_lowerbound=config.LOWER_BAND, n_refactorings_upperbound=config.UPPER_BAND)
    )

    # Do optimization for various problems with various algorithms
    res = minimize(problem=problems[2],
                   algorithm=algorithms[2],
                   termination=('n_gen', config.MAX_ITERATIONS),
                   seed=1,
                   verbose=True)
    logger.info("** FINISHED **")

    logger.info("Best Individual:")
    logger.info(res.X)
    logger.info("Objective Values:")
    logger.info(res.F)

    logger.info("==================")
    logger.info("Other Solutions:")
    for ind in res.opt:
        logger.info(ind.X)
        logger.info(ind.F)
        logger.info("==================")

    logger.info(f"Start Time: {res.start_time}")
    logger.info(f"End Time: {res.end_time}")
    logger.info(f"Execution Time in Seconds: {res.exec_time}")
    def _evaluate(self,
                  x,  #
                  out,
                  *args,
                  **kwargs):
        """
        This method iterate over an Individual, execute the refactoring operation sequentially,
        and compute quality attributes for the refactored version of the program, as objectives of the search

        params:
        x (Individual): x is an instance of Individual (i.e., a list of refactoring operations)

        """
        # Git restore`
        logger.debug("Executing git restore.")
        git_restore(config.PROJECT_PATH)
        update_understand_database(config.UDB_PATH)
        # Stage 1: Execute all refactoring operations in the sequence x
        logger.debug(f"Reached Individual with Size {len(x[0])}")
        for refactoring_operation in x[0]:
            refactoring_operation.do_refactoring()
            # Update Understand DB
            update_understand_database(config.UDB_PATH)

        # Stage 2: Computing quality attributes
        qmood = DesignQualityAttributes(udb_path=config.UDB_PATH)
        o1 = qmood.reusability
        o2 = qmood.understandability
        o3 = qmood.flexibility
        o4 = qmood.functionality
        o5 = qmood.effectiveness
        o6 = qmood.extendability
        del qmood
        o7 = testability_main(config.UDB_PATH)
        o8 = modularity_main(config.UDB_PATH)

        logger.info(f"Reusability Score: {o1}")
        logger.info(f"Understandability Score: {o2}")
        logger.info(f"Flexibility Score: {o3}")
        logger.info(f"Functionality Score: {o4}")
        logger.info(f"Effectiveness Score: {o5}")
        logger.info(f"Extendability Score: {o6}")
        logger.info(f"Testability Score: {o7}")
        logger.info(f"Modularity Score: {o8}")

        # Stage 3: Marshal objectives into vector
        out["F"] = np.array([-1 * o1, -1 * o2, -1 * o3, -1 * o4, -1 * o5, -1 * o6, -1 * o7, -1 * o8, ], dtype=float)
Example #7
0
def log_project_info(reset_=True, design_metrics_path=None, quality_attributes_path=None,
                     generation=0, testability_verbose=True, testability_log_path=None):
    """

    Logging project metrics and information

    """

    if reset_:
        reset_project()
    if quality_attributes_path is None:
        quality_attributes_path = os.path.join(config.PROJECT_LOG_DIR, 'quality_attrs_initial_values.csv')
    if design_metrics_path is None:
        design_metrics_path = os.path.join(config.PROJECT_LOG_DIR, 'design_metrics.csv')

    design_quality_attribute = DesignQualityAttributes(config.UDB_PATH)
    avg_, sum_ = design_quality_attribute.average_sum
    predicted_testability = testability_main(
        config.UDB_PATH,
        initial_value=config.CURRENT_METRICS.get("TEST", 1.0),
        verbose=testability_verbose,
        log_path=testability_log_path
    )
    mdg_modularity = modularity_main(
        config.UDB_PATH,
        initial_value=config.CURRENT_METRICS.get("MODULE", 1.0)
    )

    design_metrics = {
        "DSC": [design_quality_attribute.DSC],
        "NOH": [design_quality_attribute.NOH],
        "ANA": [design_quality_attribute.ANA],
        "MOA": [design_quality_attribute.MOA],
        "DAM": [design_quality_attribute.DAM],
        "CAMC": [design_quality_attribute.CAMC],
        "CIS": [design_quality_attribute.CIS],
        "NOM": [design_quality_attribute.NOM],
        "DCC": [design_quality_attribute.DCC],
        "MFA": [design_quality_attribute.MFA],
        "NOP": [design_quality_attribute.NOP]
    }

    quality_objectives = {
        "generation": [generation],
        "reusability": [design_quality_attribute.reusability],
        "understandability": [design_quality_attribute.understandability],
        "flexibility": [design_quality_attribute.flexibility],
        "functionality": [design_quality_attribute.functionality],
        "effectiveness": [design_quality_attribute.effectiveness],
        "extendability": [design_quality_attribute.extendability],
        "testability": [predicted_testability],
        "modularity": [mdg_modularity],
    }

    logger.info('QMOOD design metrics (N):')
    logger.info(design_metrics)

    logger.info('Objectives:')
    logger.info(quality_objectives)

    logger.info('QMOOD quality attributes sum:')
    logger.info(sum_)
    logger.info('QMOOD quality attributes mean:')
    logger.info(avg_)

    df_quality_attributes = pd.DataFrame(data=quality_objectives)
    if os.path.exists(quality_attributes_path):
        df = pd.read_csv(quality_attributes_path, index_col=False)
        df_result = pd.concat([df, df_quality_attributes], ignore_index=True)
        df_result.to_csv(quality_attributes_path, index=False)
    else:
        df_quality_attributes.to_csv(quality_attributes_path, index=False)

    df_design_metrics = pd.DataFrame(data=design_metrics)
    if os.path.exists(design_metrics_path):
        df = pd.read_csv(design_metrics_path, index_col=False)
        df_results = pd.concat([df, df_design_metrics], ignore_index=True)
        # df = df.append(df_design_metrics, ignore_index=True)
        df_results.to_csv(design_metrics_path, index=False)
    else:
        df_design_metrics.to_csv(design_metrics_path, index=False)
Example #8
0
    def notify(self, algorithm, **kwargs):
        # self.data["best"].append(algorithm.pop.get("F").min())
        logger.info(f'Generation #{algorithm.n_gen + config.NGEN} was finished:')
        # logger.info(f'Best solution:')
        # logger.info(f'{algorithm.pop.get("F")}')
        # logger.info(f'Pareto-front solutions:')
        # logger.info(f'{algorithm.pf}')

        X, F, CV, G = algorithm.opt.get("X", "F", "CV", "G")
        logger.info(f'Optimum solutions:')
        logger.info(f'{F}')

        # Log evolved population at end of each generation
        generation_log_path = f'{config.PROJECT_LOG_DIR}generations_logs/'
        if not os.path.exists(generation_log_path):
            os.makedirs(generation_log_path)

        generation_endof_date_time = config.dt.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
        population_log_file_path = os.path.join(
            generation_log_path,
            f'pop_gen{algorithm.n_gen + config.NGEN}_{generation_endof_date_time}.json'
        )
        pop_opt_log_file_path = os.path.join(
            generation_log_path,
            f'pop_opt_gen{algorithm.n_gen + config.NGEN}_{generation_endof_date_time}.json'
        )

        pop_opt_objective_value_log = os.path.join(
            config.PROJECT_LOG_DIR,
            f'{config.PROJECT_NAME}_objectives_log_{config.global_execution_start_time}.csv'
        )

        population_trimmed = []
        for chromosome in algorithm.pop:
            chromosome_new = []
            for gene_ in chromosome.X[0]:
                chromosome_new.append((gene_.name, gene_.params))
            population_trimmed.append(chromosome_new)

        with open(population_log_file_path, 'w', encoding='utf-8') as fp:
            json.dump(population_trimmed, fp, indent=4)

        population_trimmed = []
        objective_values_content = ''
        for chromosome in algorithm.opt:
            chromosome_new = []
            for gene_ in chromosome.X[0]:
                chromosome_new.append((gene_.name, gene_.params))
            population_trimmed.append(chromosome_new)

            objective_values_content += f'{algorithm.n_gen + config.NGEN},'
            for gene_objective_ in chromosome.F:
                objective_values_content += f'{gene_objective_},'
            objective_values_content += '\n'

        with open(pop_opt_log_file_path, mode='w', encoding='utf-8') as fp:
            json.dump(population_trimmed, fp, indent=4)

        if not os.path.exists(pop_opt_objective_value_log):
            writing_mode = 'w'
        else:
            writing_mode = 'a'
        with open(pop_opt_objective_value_log, mode=writing_mode, encoding='utf-8') as fp:
            fp.write(objective_values_content)

        logger.info('-' * 100)
        logger.info(' ')
Example #9
0
    def _do(self, problem, X, **kwargs):
        """

        For population X

        Args:

            problem (Problem): An instance of pymoo Problem class to be optimized.

            X (np.array): Population

        """

        # The input of has the following shape (n_parents, n_matings, n_var)
        _, n_matings, n_var = X.shape

        # The output will be with the shape (n_offsprings, n_matings, n_var)
        # Because there the number of parents and offsprings are equal it keeps the shape of X
        Y = np.full_like(X, None, dtype=object)

        # print(X.shape)
        # print(X)

        # for each mating provided
        for k in range(n_matings):
            # get the first and the second parent (a and b are instance of individuals)
            a, b = X[0, k, 0], X[1, k, 0]
            # print('### a', a)
            # print('### b', b)
            # print('len a', len(a))
            # print('len b', len(b))

            len_min = min(len(a), len(b))
            cross_point_1 = random.randint(1, int(len_min * 0.30))
            cross_point_2 = random.randint(int(len_min * 0.70), len_min - 1)
            if random.random() < 0.5:
                cross_point_final = cross_point_1
            else:
                cross_point_final = cross_point_2
            logger.info(f'cross_point_final: {cross_point_final}')
            offspring_a = []
            offspring_b = []
            for i in range(0, cross_point_final):
                offspring_a.append(deepcopy(a[i]))
                offspring_b.append(deepcopy(b[i]))

            for i in range(cross_point_final, len_min):
                offspring_a.append(deepcopy(b[i]))
                offspring_b.append(deepcopy(a[i]))

            if len(b) > len(a):
                for i in range(len(a), len(b)):
                    offspring_a.append(deepcopy(b[i]))
            else:
                for i in range(len(b), len(a)):
                    offspring_b.append(deepcopy(a[i]))

            # print('$$$ offspring_a', offspring_a)
            # print('$$$ offspring_b', offspring_b)
            # print('len offspring_a', len(offspring_a))
            # print('len offspring_b', len(offspring_b))

            # Join offsprings to offspring population Y
            Y[0, k, 0], Y[1, k, 0] = offspring_a, offspring_b
            # quit()

        return Y
Example #10
0
    def _evaluate(self, x, out, *args, **kwargs):
        """

        This method iterate over a population, execute the refactoring operations in each individual sequentially,
        and compute quality attributes for the refactored version of the program, as objectives of the search.
        By default, elementwise_evaluation is set to False, which implies the _evaluate retrieves a set of solutions.

        Args:

            x (Population): x is a matrix where each row is an individual, and each column a variable.\
            We have one variable of type list (Individual) ==> x.shape = (len(Population), 1)


        """

        objective_values = []
        for k, individual_ in enumerate(x):
            # Stage 0: Git restore
            logger.debug("Executing git restore.")
            git_restore(config.PROJECT_PATH)
            logger.debug("Updating understand database after git restore.")
            update_understand_database(config.UDB_PATH)

            # Stage 1: Execute all refactoring operations in the sequence x
            logger.debug(f"Reached an Individual with size {len(individual_[0])}")
            for refactoring_operation in individual_[0]:
                res = refactoring_operation.do_refactoring()
                # Update Understand DB
                logger.debug(f"Updating understand database after {refactoring_operation.name}.")
                update_understand_database(config.UDB_PATH)

            # Stage 2:
            arr = Array('d', range(self.n_obj))
            if self.evaluate_in_parallel:
                # Stage 2 (parallel mood): Computing quality attributes
                p1 = Process(target=calc_qmood_objectives, args=(arr,))
                if self.n_obj == 8:
                    p2 = Process(target=calc_testability_objective, args=(config.UDB_PATH, arr,))
                    p3 = Process(target=calc_modularity_objective, args=(config.UDB_PATH, arr,))
                    p1.start(), p2.start(), p3.start()
                    p1.join(), p2.join(), p3.join()
                else:
                    p1.start()
                    p1.join()
            else:
                # Stage 2 (sequential mood): Computing quality attributes
                qmood_quality_attributes = DesignQualityAttributes(udb_path=config.UDB_PATH)
                arr[0] = qmood_quality_attributes.reusability
                arr[1] = qmood_quality_attributes.understandability
                arr[2] = qmood_quality_attributes.flexibility
                arr[3] = qmood_quality_attributes.functionality
                arr[4] = qmood_quality_attributes.effectiveness
                arr[5] = qmood_quality_attributes.extendability
                if self.n_obj == 8:
                    arr[6] = testability_main(config.UDB_PATH, initial_value=config.CURRENT_METRICS.get("TEST", 1.0))
                    arr[7] = modularity_main(config.UDB_PATH, initial_value=config.CURRENT_METRICS.get("MODULE", 1.0))

                if self.verbose_design_metrics:
                    design_metrics = {
                        "DSC": [qmood_quality_attributes.DSC],
                        "NOH": [qmood_quality_attributes.NOH],
                        "ANA": [qmood_quality_attributes.ANA],
                        "MOA": [qmood_quality_attributes.MOA],
                        "DAM": [qmood_quality_attributes.DAM],
                        "CAMC": [qmood_quality_attributes.CAMC],
                        "CIS": [qmood_quality_attributes.CIS],
                        "NOM": [qmood_quality_attributes.NOM],
                        "DCC": [qmood_quality_attributes.DCC],
                        "MFA": [qmood_quality_attributes.MFA],
                        "NOP": [qmood_quality_attributes.NOP]
                    }
                    self.log_design_metrics(design_metrics)

                del qmood_quality_attributes

            # Stage 3: Marshal objectives into vector
            objective_values.append([-1 * i for i in arr])
            logger.info(f"Objective values for individual {k}: {[i for i in arr]}")

        # Stage 4: Marshal all objectives into out dictionary
        out['F'] = np.array(objective_values, dtype=float)
Example #11
0
def main():
    """

    Optimization module main driver

    """

    # Define initialization objects
    initializer_class = SmellInitialization if config.WARM_START else RandomInitialization
    initializer_object = initializer_class(
        udb_path=config.UDB_PATH,
        population_size=config.POPULATION_SIZE,
        lower_band=config.LOWER_BAND,
        upper_band=config.UPPER_BAND
    )

    # -------------------------------------------
    # Define optimization problems
    problems = list()  # 0: Genetic (Single), 1: NSGA-II (Multi), 2: NSGA-III (Many) objectives problems
    problems.append(
        ProblemSingleObjective(
            n_objectives=config.NUMBER_OBJECTIVES,
            n_refactorings_lowerbound=config.LOWER_BAND,
            n_refactorings_upperbound=config.UPPER_BAND,
            evaluate_in_parallel=False,
        )
    )
    problems.append(
        ProblemMultiObjective(
            n_objectives=config.NUMBER_OBJECTIVES,
            n_refactorings_lowerbound=config.LOWER_BAND,
            n_refactorings_upperbound=config.UPPER_BAND,
            evaluate_in_parallel=False,
        )
    )
    problems.append(
        ProblemManyObjective(
            n_objectives=config.NUMBER_OBJECTIVES,
            n_refactorings_lowerbound=config.LOWER_BAND,
            n_refactorings_upperbound=config.UPPER_BAND,
            evaluate_in_parallel=False,
            verbose_design_metrics=True,
        )
    )

    # Define search algorithms
    algorithms = list()
    # 1: GA
    alg1 = GA(
        pop_size=config.POPULATION_SIZE,
        sampling=PopulationInitialization(initializer_object),
        crossover=AdaptiveSinglePointCrossover(prob=config.CROSSOVER_PROBABILITY),
        # crossover=get_crossover("real_k_point", n_points=2),
        mutation=BitStringMutation(prob=config.MUTATION_PROBABILITY, initializer=initializer_object),
        eliminate_duplicates=ElementwiseDuplicateElimination(cmp_func=is_equal_2_refactorings_list),
        n_gen=config.NGEN,
    )
    algorithms.append(alg1)

    # 2: NSGA-II
    alg2 = NSGA2(
        pop_size=config.POPULATION_SIZE,
        sampling=PopulationInitialization(initializer_object),
        crossover=AdaptiveSinglePointCrossover(prob=config.CROSSOVER_PROBABILITY),
        # crossover=get_crossover("real_k_point", n_points=2),
        mutation=BitStringMutation(prob=config.MUTATION_PROBABILITY, initializer=initializer_object),
        eliminate_duplicates=ElementwiseDuplicateElimination(cmp_func=is_equal_2_refactorings_list),
        n_gen=config.NGEN,
    )
    algorithms.append(alg2)

    # 3: NSGA-III
    # pop_size must be equal or larger than the number of reference directions
    number_of_references_points = config.POPULATION_SIZE - int(config.POPULATION_SIZE * 0.20)
    ref_dirs = get_reference_directions(
        'energy',  # algorithm
        config.NUMBER_OBJECTIVES,  # number of objectives
        number_of_references_points,  # number of reference directions
        seed=1
    )
    alg3 = NSGA3(
        ref_dirs=ref_dirs,
        pop_size=config.POPULATION_SIZE,  # 200
        sampling=PopulationInitialization(initializer_object),
        selection=TournamentSelection(func_comp=binary_tournament),
        crossover=AdaptiveSinglePointCrossover(prob=config.CROSSOVER_PROBABILITY, ),
        # crossover=get_crossover("real_k_point", n_points=2),
        mutation=BitStringMutation(prob=config.MUTATION_PROBABILITY, initializer=initializer_object),
        eliminate_duplicates=ElementwiseDuplicateElimination(cmp_func=is_equal_2_refactorings_list),
        n_gen=config.NGEN,
    )
    algorithms.append(alg3)

    # Termination of algorithms
    my_termination = MultiObjectiveDefaultTermination(
        x_tol=None,
        cv_tol=None,
        f_tol=0.0015,
        nth_gen=5,
        n_last=5,
        n_max_gen=config.MAX_ITERATIONS,  # about 1000 - 1400
        n_max_evals=1e6
    )

    # Do optimization for various problems with various algorithms
    res = minimize(
        problem=problems[config.PROBLEM],
        algorithm=algorithms[config.PROBLEM],
        termination=my_termination,
        seed=1,
        verbose=False,
        copy_algorithm=True,
        copy_termination=True,
        save_history=False,
        callback=LogCallback(),
    )
    # np.save('checkpoint', res.algorithm)

    # Log results
    logger.info(f"***** Algorithm was finished in {res.algorithm.n_gen + config.NGEN} generations *****")
    logger.info(" ")
    logger.info("============ time information ============")
    logger.info(f"Start time: {datetime.fromtimestamp(res.start_time).strftime('%Y-%m-%d %H:%M:%S')}")
    logger.info(f"End time: {datetime.fromtimestamp(res.end_time).strftime('%Y-%m-%d %H:%M:%S')}")
    logger.info(f"Execution time in seconds: {res.exec_time}")
    logger.info(f"Execution time in minutes: {res.exec_time / 60}")
    logger.info(f"Execution time in hours: {res.exec_time / (60 * 60)}")
    # logger.info(f"Number of generations: {res.algorithm.n_gen}")
    # logger.info(f"Number of generations", res.algorithm.termination)

    # Log optimum solutions
    logger.info("============ All opt solutions ============")
    for i, ind in enumerate(res.opt):
        logger.info(f'Opt refactoring sequence {i}:')
        logger.info(ind.X)
        logger.info(f'Opt refactoring sequence corresponding objectives vector {i}:')
        logger.info(ind.F)
        logger.info("-" * 75)

    # Log best refactorings
    logger.info("============ Best refactoring sequences (a set of non-dominated solutions) ============")
    for i, ind in enumerate(res.X):
        logger.info(f'Best refactoring sequence {i}:')
        logger.info(ind)
        logger.info("-" * 75)
    logger.info("============ Best objective values (a set of non-dominated solutions) ============")
    for i, ind_objective in enumerate(res.F):
        logger.info(f'Best refactoring sequence corresponding objectives vector {i}:')
        logger.info(ind_objective)
        logger.info("-" * 75)

    # Save best refactorings
    population_trimmed = []
    objective_values_content = ''
    for chromosome in res.X:
        chromosome_new = []
        if config.PROBLEM == 0:  # i.e., single objective problem
            for gene_ in chromosome:
                chromosome_new.append((gene_.name, gene_.params))
        else:
            for gene_ in chromosome[0]:
                chromosome_new.append((gene_.name, gene_.params))
        population_trimmed.append(chromosome_new)

    for objective_vector in res.F:
        objective_values_content += f'{res.algorithm.n_gen + config.NGEN},'
        if config.PROBLEM == 0:
            objective_values_content += f'{objective_vector},'
        else:
            for objective_ in objective_vector:
                objective_values_content += f'{objective_},'
        objective_values_content += '\n'

    best_refactoring_sequences_path = os.path.join(
        config.PROJECT_LOG_DIR,
        f'best_refactoring_sequences_after_{res.algorithm.n_gen + config.NGEN}gens.json'
    )
    with open(best_refactoring_sequences_path, mode='w', encoding='utf-8') as fp:
        json.dump(population_trimmed, fp, indent=4)

    best_refactoring_sequences_objectives_path = os.path.join(
        config.PROJECT_LOG_DIR,
        f'best_refactoring_sequences_objectives_after_{res.algorithm.n_gen + config.NGEN}gens.csv'
    )
    with open(best_refactoring_sequences_objectives_path, mode='w', encoding='utf-8') as fp:
        fp.write(objective_values_content)

    try:
        pf = res.F
        # dm = HighTradeoffPoints()
        dm = get_decision_making("high-tradeoff")
        I = dm.do(pf)

        logger.info("============ High-tradeoff points refactoring sequences ============")
        for i, ind in enumerate(res.X[I]):
            logger.info(f'High tradeoff points refactoring sequence {i}:')
            logger.info(ind)
            logger.info("-" * 75)
        logger.info("============ High-tradeoff points objective values  ============")
        for i, ind_objective in enumerate(pf[I]):
            logger.info(f'High-tradeoff points refactoring sequence corresponding objectives vector {i}:')
            logger.info(ind_objective)
            logger.info("-" * 75)

        logger.info("High-tradeoff points mean:")
        logger.info(np.mean(pf[I], axis=0))
        logger.info("High-tradeoff points median:")
        logger.info(np.median(pf[I], axis=0))

        # Save high-tradeoff refactorings
        population_trimmed = []
        objective_values_content = ''
        for chromosome in res.X[I]:
            chromosome_new = []
            if config.PROBLEM == 0:  # i.e., single objective problem
                for gene_ in chromosome:
                    chromosome_new.append((gene_.name, gene_.params))
            else:
                for gene_ in chromosome[0]:
                    chromosome_new.append((gene_.name, gene_.params))
            population_trimmed.append(chromosome_new)

        for objective_vector in pf[I]:
            objective_values_content += f'{res.algorithm.n_gen + config.NGEN},'
            if config.PROBLEM == 0:
                objective_values_content += f'{objective_vector},'
            else:
                for objective_ in objective_vector:
                    objective_values_content += f'{objective_},'
            objective_values_content += '\n'

        high_tradeoff_path = os.path.join(
            config.PROJECT_LOG_DIR,
            f'high_tradeoff_points_refactoring_after_{res.algorithm.n_gen + config.NGEN}gens.json'
        )
        with open(high_tradeoff_path, mode='w', encoding='utf-8') as fp:
            json.dump(population_trimmed, fp, indent=4)

        high_tradeoff_path_objectives_path = os.path.join(
            config.PROJECT_LOG_DIR,
            f'high_tradeoff_points_after_{res.algorithm.n_gen + config.NGEN}gens.csv'
        )
        with open(high_tradeoff_path_objectives_path, mode='w', encoding='utf-8') as fp:
            fp.write(objective_values_content)

    except:
        logger.error("No multi-optimal solutions (error in computing high tradeoff points)!")
Example #12
0
                for objective_ in objective_vector:
                    objective_values_content += f'{objective_},'
            objective_values_content += '\n'

        high_tradeoff_path = os.path.join(
            config.PROJECT_LOG_DIR,
            f'high_tradeoff_points_refactoring_after_{res.algorithm.n_gen + config.NGEN}gens.json'
        )
        with open(high_tradeoff_path, mode='w', encoding='utf-8') as fp:
            json.dump(population_trimmed, fp, indent=4)

        high_tradeoff_path_objectives_path = os.path.join(
            config.PROJECT_LOG_DIR,
            f'high_tradeoff_points_after_{res.algorithm.n_gen + config.NGEN}gens.csv'
        )
        with open(high_tradeoff_path_objectives_path, mode='w', encoding='utf-8') as fp:
            fp.write(objective_values_content)

    except:
        logger.error("No multi-optimal solutions (error in computing high tradeoff points)!")


# CodART search-based refactoring module main driver
if __name__ == '__main__':
    # print(logger.handlers)
    config.log_experiment_info()
    logger.info('============ Objectives values before Refactoring ============')
    log_project_info(reset_=True)
    # quit()
    main()
Example #13
0
def main(source_class: str, source_package: str, target_class: str, target_package: str, field_name: str,
         udb_path: str, *args, **kwargs):
    """

    Move filed main API

    """

    import_statement = None
    if source_package != target_package:
        import_statement = f"\nimport {target_package}.{target_class};"
    instance_name = target_class.lower() + "ByCodArt"
    db = und.open(udb_path)

    # Check if field is static
    field_ent = db.lookup(f"{source_package}.{source_class}.{field_name}", "Variable")
    if len(field_ent) == 0:
        logger.error(f"Entity not found with query: {source_package}.{source_class}.{field_name}.")
        db.close()
        return False

    if source_package == target_package and source_class == target_class:
        logger.error("Can not move to self.")
        db.close()
        return False

    field_ent = field_ent[0]
    is_static = field_ent.kindname() == STATIC

    if is_static:
        logger.warning("Field is static!")

    # Find usages
    usages = {}

    for ref in field_ent.refs("Setby, Useby"):
        file = ref.file().longname()
        if file in usages:
            usages[file].append(ref.line())
        else:
            usages[file] = [ref.line(), ]
    try:
        src_class_file = db.lookup(f"{source_package}.{source_class}.java")[0].longname()
        target_class_file = db.lookup(f"{target_package}.{target_class}.java")[0].longname()
    except IndexError:
        logger.error("This is a nested class.")
        logger.info(f"{source_package}.{source_class}.java")
        logger.info(f"{target_package}.{target_class}.java")
        db.close()
        return False

    db.close()

    # Check if there is an cycle
    listener = parse_and_walk(
        file_path=target_class_file,
        listener_class=CheckCycleListener,
        class_name=source_class,
    )

    if not listener.is_valid:
        logger.error(f"Can not move field because there is a cycle between {source_class}, {target_class}")
        # db.close()
        return False

    # Propagate Changes
    for file in usages.keys():
        parse_and_walk(
            file_path=file,
            listener_class=PropagateListener,
            has_write=True,
            field_name=field_name,
            new_name=f"{instance_name}.{field_name}",
            lines=usages[file],
        )

    # Do the cut and paste!
    # Cut
    listener = parse_and_walk(
        file_path=src_class_file,
        listener_class=CutFieldListener,
        has_write=True,
        class_name=target_class,
        instance_name=instance_name,
        field_name=field_name,
        is_static=is_static,
        import_statement=import_statement
    )

    field_text = listener.field_text

    # Paste
    parse_and_walk(
        file_path=target_class_file,
        listener_class=PasteFieldListener,
        has_write=True,
        field_text=field_text,
    )

    # db.close()
    return True
Example #14
0
def main(source_class: str, source_package: str, target_class: str,
         target_package: str, method_name: str, udb_path: str, *args,
         **kwargs):
    """


    """

    import_statement = None
    if source_package != target_package:
        import_statement = f"\nimport {target_package}.{target_class};"
    instance_name = target_class.lower() + "ByCodArt"
    db = und.open(udb_path)
    method_map, class_ent = get_source_class_map(db, source_class)
    if class_ent is None:
        logger.error("Class entity is None")
        return False

    # Strong overlay precondition
    # if class_ent.refs("Extend ~Implicit, ExtendBy, Implement"):
    #     logger.error("Class is in inheritance or implements an interface.")
    #     db.close()
    #     return False

    # Check if method is static
    method_ent = db.lookup(f"{source_package}.{source_class}.{method_name}",
                           "Method")
    if len(method_ent) >= 1:
        method_ent = method_ent[0]
    else:
        logger.error("Entity not found.")
        db.close()
        return False

    if method_ent.simplename() != method_name:
        logger.error("Can not move method duo to duplicated entities.")
        logger.info(f"{method_ent}, {method_ent.kindname()}")
        db.close()
        return False

    if source_package == target_package and source_class == target_class:
        logger.error("Can not move to self.")
        db.close()
        return False

    is_static = STATIC in method_ent.kindname()
    # Find usages
    usages = {}

    for ref in method_ent.refs("Callby"):
        file = ref.file().longname()
        if file in usages:
            usages[file].append(ref.line())
        else:
            usages[file] = [
                ref.line(),
            ]

    try:
        src_class_file = db.lookup(f"{source_package}.{source_class}.java",
                                   "File")[0].longname()
        target_class_file = db.lookup(f"{target_package}.{target_class}.java",
                                      "File")[0].longname()
    except IndexError:
        logger.error("This is a nested method.")
        logger.info(f"{source_package}.{source_class}.java")
        logger.info(f"{target_package}.{target_class}.java")
        db.close()
        return False

    db.close()

    # Check if there is an cycle
    listener = parse_and_walk(file_path=target_class_file,
                              listener_class=CheckCycleListener,
                              class_name=source_class)

    if not listener.is_valid:
        logger.error(
            f"Can not move method because there is a cycle between {source_class}, {target_class}"
        )
        # db.close()
        return False

    # Propagate Changes
    for file in usages.keys():
        public_class_name = os.path.basename(file).split(".")[0]
        is_in_target_class = public_class_name == target_class
        parse_and_walk(
            file_path=file,
            listener_class=PropagateListener,
            has_write=True,
            method_name=method_name,
            new_name=f"{instance_name}.{method_name}",
            lines=usages[file],
            is_in_target_class=is_in_target_class,
            method_map=method_map,
        )
    # exit(-1)
    # Do the cut and paste!
    # Cut
    listener = parse_and_walk(
        file_path=src_class_file,
        listener_class=CutMethodListener,
        has_write=True,
        class_name=target_class,
        instance_name=instance_name,
        method_name=method_name,
        is_static=is_static,
        import_statement=import_statement,
    )

    method_text = listener.method_text

    # Paste
    listener = parse_and_walk(
        file_path=target_class_file,
        listener_class=PasteMethodListener,
        has_write=True,
        method_text=method_text,
        source_class=source_class,
        method_map=method_map,
        imports=listener.imports,
    )

    # Post-Paste: Reference Injection
    parse_and_walk(
        file_path=target_class_file,
        listener_class=ReferenceInjectorAndConstructorListener,
        has_write=True,
        method_text=method_text,
        source_class=source_class,
        method_map=method_map,
        imports=None,
        has_empty_cons=listener.has_empty_cons,
    )
    # db.close()
    return True